1 /*
2  * Texas Instruments System Control Interface Driver
3  *   Based on Linux and U-Boot implementation
4  *
5  * Copyright (C) 2018-2024 Texas Instruments Incorporated - https://www.ti.com/
6  *
7  * SPDX-License-Identifier: BSD-3-Clause
8  */
9 
10 #include <errno.h>
11 #include <stdbool.h>
12 #include <stddef.h>
13 #include <string.h>
14 
15 #include <platform_def.h>
16 #include <lib/bakery_lock.h>
17 
18 #include <common/debug.h>
19 #include <sec_proxy.h>
20 
21 #include "ti_sci_protocol.h"
22 #include "ti_sci.h"
23 
24 #if USE_COHERENT_MEM
25 __section(".tzfw_coherent_mem")
26 #endif
27 static uint8_t message_sequence;
28 
29 DEFINE_BAKERY_LOCK(ti_sci_xfer_lock);
30 
31 /**
32  * struct ti_sci_xfer - Structure representing a message flow
33  * @tx_message:	Transmit message
34  * @rx_message:	Receive message
35  */
36 struct ti_sci_xfer {
37 	struct k3_sec_proxy_msg tx_message;
38 	struct k3_sec_proxy_msg rx_message;
39 };
40 
41 /**
42  * ti_sci_setup_one_xfer() - Setup one message type
43  *
44  * @msg_type:	Message type
45  * @msg_flags:	Flag to set for the message
46  * @tx_buf:	Buffer to be sent to mailbox channel
47  * @tx_message_size: transmit message size
48  * @rx_buf:	Buffer to be received from mailbox channel
49  * @rx_message_size: receive message size
50  *
51  * Helper function which is used by various command functions that are
52  * exposed to clients of this driver for allocating a message traffic event.
53  *
54  * Return: 0 if all goes well, else appropriate error message
55  */
ti_sci_setup_one_xfer(uint16_t msg_type,uint32_t msg_flags,void * tx_buf,size_t tx_message_size,void * rx_buf,size_t rx_message_size,struct ti_sci_xfer * xfer)56 static int ti_sci_setup_one_xfer(uint16_t msg_type, uint32_t msg_flags,
57 				 void *tx_buf,
58 				 size_t tx_message_size,
59 				 void *rx_buf,
60 				 size_t rx_message_size,
61 				 struct ti_sci_xfer *xfer)
62 {
63 	struct ti_sci_msg_hdr *hdr;
64 
65 	/* Ensure we have sane transfer sizes */
66 	if (rx_message_size > TI_SCI_MAX_MESSAGE_SIZE ||
67 	    tx_message_size > TI_SCI_MAX_MESSAGE_SIZE ||
68 	    tx_message_size < sizeof(*hdr))
69 		return -ERANGE;
70 
71 	hdr = (struct ti_sci_msg_hdr *)tx_buf;
72 	hdr->seq = ++message_sequence;
73 	hdr->type = msg_type;
74 	hdr->host = TI_SCI_HOST_ID;
75 	hdr->flags = msg_flags;
76 	/* Request a response if rx_message_size is non-zero */
77 	if (rx_message_size != 0U) {
78 		hdr->flags |= TI_SCI_FLAG_REQ_ACK_ON_PROCESSED;
79 	}
80 
81 	xfer->tx_message.buf = tx_buf;
82 	xfer->tx_message.len = tx_message_size;
83 
84 	xfer->rx_message.buf = rx_buf;
85 	xfer->rx_message.len = rx_message_size;
86 
87 	return 0;
88 }
89 
90 /**
91  * ti_sci_get_response() - Receive response from mailbox channel
92  *
93  * @xfer:	Transfer to initiate and wait for response
94  * @chan:	Channel to receive the response
95  *
96  * Return: 0 if all goes well, else appropriate error message
97  */
ti_sci_get_response(struct k3_sec_proxy_msg * msg,enum k3_sec_proxy_chan_id chan)98 static int ti_sci_get_response(struct k3_sec_proxy_msg *msg,
99 			       enum k3_sec_proxy_chan_id chan)
100 {
101 	struct ti_sci_msg_hdr *hdr;
102 	unsigned int retry = 5;
103 	int ret;
104 
105 	for (; retry > 0; retry--) {
106 		/* Receive the response */
107 		ret = k3_sec_proxy_recv(chan, msg);
108 		if (ret) {
109 			ERROR("Message receive failed (%d)\n", ret);
110 			return ret;
111 		}
112 
113 		/* msg is updated by Secure Proxy driver */
114 		hdr = (struct ti_sci_msg_hdr *)msg->buf;
115 
116 		/* Sanity check for message response */
117 		if (hdr->seq == message_sequence)
118 			break;
119 		else
120 			WARN("Message with sequence ID %u is not expected\n", hdr->seq);
121 	}
122 	if (!retry) {
123 		ERROR("Timed out waiting for message\n");
124 		return -EINVAL;
125 	}
126 
127 	if (msg->len > TI_SCI_MAX_MESSAGE_SIZE) {
128 		ERROR("Unable to handle %lu xfer (max %d)\n",
129 		      msg->len, TI_SCI_MAX_MESSAGE_SIZE);
130 		return -EINVAL;
131 	}
132 
133 	if (!(hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK))
134 		return -ENODEV;
135 
136 	return 0;
137 }
138 
139 /**
140  * ti_sci_do_xfer() - Do one transfer
141  *
142  * @xfer:	Transfer to initiate and wait for response
143  *
144  * Return: 0 if all goes well, else appropriate error message
145  */
ti_sci_do_xfer(struct ti_sci_xfer * xfer)146 static int ti_sci_do_xfer(struct ti_sci_xfer *xfer)
147 {
148 	struct k3_sec_proxy_msg *tx_msg = &xfer->tx_message;
149 	struct k3_sec_proxy_msg *rx_msg = &xfer->rx_message;
150 	int ret;
151 
152 	bakery_lock_get(&ti_sci_xfer_lock);
153 
154 	/* Clear any spurious messages in receive queue */
155 	ret = k3_sec_proxy_clear_rx_thread(SP_RESPONSE);
156 	if (ret) {
157 		ERROR("Could not clear response queue (%d)\n", ret);
158 		goto unlock;
159 	}
160 
161 	/* Send the message */
162 	ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, tx_msg);
163 	if (ret) {
164 		ERROR("Message sending failed (%d)\n", ret);
165 		goto unlock;
166 	}
167 
168 	/* Get the response if requested */
169 	if (rx_msg->len != 0U) {
170 		ret = ti_sci_get_response(rx_msg, SP_RESPONSE);
171 		if (ret != 0U) {
172 			ERROR("Failed to get response (%d)\n", ret);
173 			goto unlock;
174 		}
175 	}
176 
177 unlock:
178 	bakery_lock_release(&ti_sci_xfer_lock);
179 
180 	return ret;
181 }
182 
183 /**
184  * ti_sci_get_revision() - Get the revision of the SCI entity
185  *
186  * Updates the SCI information in the internal data structure.
187  *
188  * @version: Structure containing the version info
189  *
190  * Return: 0 if all goes well, else appropriate error message
191  */
ti_sci_get_revision(struct ti_sci_msg_version * version)192 int ti_sci_get_revision(struct ti_sci_msg_version *version)
193 {
194 	struct ti_sci_msg_resp_version rev_info;
195 	struct ti_sci_msg_hdr hdr;
196 	struct ti_sci_xfer xfer;
197 	int ret;
198 
199 	ret = ti_sci_setup_one_xfer(TI_SCI_MSG_VERSION, 0x0,
200 				    &hdr, sizeof(hdr),
201 				    &rev_info, sizeof(rev_info),
202 				    &xfer);
203 	if (ret) {
204 		ERROR("Message alloc failed (%d)\n", ret);
205 		return ret;
206 	}
207 
208 	ret = ti_sci_do_xfer(&xfer);
209 	if (ret) {
210 		ERROR("Transfer send failed (%d)\n", ret);
211 		return ret;
212 	}
213 
214 	memcpy(version->firmware_description, rev_info.firmware_description,
215 		sizeof(rev_info.firmware_description));
216 	version->abi_major = rev_info.abi_major;
217 	version->abi_minor = rev_info.abi_minor;
218 	version->firmware_revision = rev_info.firmware_revision;
219 	version->sub_version = rev_info.sub_version;
220 	version->patch_version = rev_info.patch_version;
221 
222 	return 0;
223 }
224 
225 /**
226  * ti_sci_query_fw_caps() - Get the FW/SoC capabilities
227  * @handle:		Pointer to TI SCI handle
228  * @fw_caps:		Each bit in fw_caps indicating one FW/SOC capability
229  *
230  * Return: 0 if all went well, else returns appropriate error value.
231  */
ti_sci_query_fw_caps(uint64_t * fw_caps)232 int ti_sci_query_fw_caps(uint64_t *fw_caps)
233 {
234 	struct ti_sci_msg_hdr req;
235 	struct ti_sci_msg_resp_query_fw_caps resp;
236 
237 	struct ti_sci_xfer xfer;
238 	int ret;
239 
240 	ret = ti_sci_setup_one_xfer(TI_SCI_MSG_QUERY_FW_CAPS, 0,
241 				    &req, sizeof(req),
242 				    &resp, sizeof(resp),
243 				    &xfer);
244 	if (ret != 0U) {
245 		ERROR("Message alloc failed (%d)\n", ret);
246 		return ret;
247 	}
248 
249 	ret = ti_sci_do_xfer(&xfer);
250 	if (ret != 0U) {
251 		ERROR("Transfer send failed (%d)\n", ret);
252 		return ret;
253 	}
254 
255 	if (fw_caps)
256 		*fw_caps = resp.fw_caps;
257 
258 	return 0;
259 }
260 
261 /**
262  * ti_sci_device_set_state() - Set device state
263  *
264  * @id:		Device identifier
265  * @flags:	flags to setup for the device
266  * @state:	State to move the device to
267  *
268  * Return: 0 if all goes well, else appropriate error message
269  */
ti_sci_device_set_state(uint32_t id,uint32_t flags,uint8_t state)270 static int ti_sci_device_set_state(uint32_t id, uint32_t flags, uint8_t state)
271 {
272 	struct ti_sci_msg_req_set_device_state req;
273 	struct ti_sci_msg_hdr resp;
274 
275 	struct ti_sci_xfer xfer;
276 	int ret;
277 
278 	ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_STATE, flags,
279 				    &req, sizeof(req),
280 				    &resp, sizeof(resp),
281 				    &xfer);
282 	if (ret) {
283 		ERROR("Message alloc failed (%d)\n", ret);
284 		return ret;
285 	}
286 
287 	req.id = id;
288 	req.state = state;
289 
290 	ret = ti_sci_do_xfer(&xfer);
291 	if (ret) {
292 		ERROR("Transfer send failed (%d)\n", ret);
293 		return ret;
294 	}
295 
296 	return 0;
297 }
298 
299 /**
300  * ti_sci_device_get_state() - Get device state
301  *
302  * @id:		Device Identifier
303  * @clcnt:	Pointer to Context Loss Count
304  * @resets:	pointer to resets
305  * @p_state:	pointer to p_state
306  * @c_state:	pointer to c_state
307  *
308  * Return: 0 if all goes well, else appropriate error message
309  */
ti_sci_device_get_state(uint32_t id,uint32_t * clcnt,uint32_t * resets,uint8_t * p_state,uint8_t * c_state)310 static int ti_sci_device_get_state(uint32_t id,  uint32_t *clcnt,
311 				   uint32_t *resets, uint8_t *p_state,
312 				   uint8_t *c_state)
313 {
314 	struct ti_sci_msg_req_get_device_state req;
315 	struct ti_sci_msg_resp_get_device_state resp;
316 
317 	struct ti_sci_xfer xfer;
318 	int ret;
319 
320 	if (!clcnt && !resets && !p_state && !c_state)
321 		return -EINVAL;
322 
323 	ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_DEVICE_STATE, 0,
324 				    &req, sizeof(req),
325 				    &resp, sizeof(resp),
326 				    &xfer);
327 	if (ret) {
328 		ERROR("Message alloc failed (%d)\n", ret);
329 		return ret;
330 	}
331 
332 	req.id = id;
333 
334 	ret = ti_sci_do_xfer(&xfer);
335 	if (ret) {
336 		ERROR("Transfer send failed (%d)\n", ret);
337 		return ret;
338 	}
339 
340 	if (clcnt)
341 		*clcnt = resp.context_loss_count;
342 	if (resets)
343 		*resets = resp.resets;
344 	if (p_state)
345 		*p_state = resp.programmed_state;
346 	if (c_state)
347 		*c_state = resp.current_state;
348 
349 	return 0;
350 }
351 
352 /**
353  * ti_sci_device_get() - Request for device managed by TISCI
354  *
355  * @id:		Device Identifier
356  *
357  * Request for the device - NOTE: the client MUST maintain integrity of
358  * usage count by balancing get_device with put_device. No refcounting is
359  * managed by driver for that purpose.
360  *
361  * Return: 0 if all goes well, else appropriate error message
362  */
ti_sci_device_get(uint32_t id)363 int ti_sci_device_get(uint32_t id)
364 {
365 	return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_ON);
366 }
367 
368 /**
369  * ti_sci_device_get_exclusive() - Exclusive request for device managed by TISCI
370  *
371  * @id:		Device Identifier
372  *
373  * Request for the device - NOTE: the client MUST maintain integrity of
374  * usage count by balancing get_device with put_device. No refcounting is
375  * managed by driver for that purpose.
376  *
377  * NOTE: This _exclusive version of the get API is for exclusive access to the
378  * device. Any other host in the system will fail to get this device after this
379  * call until exclusive access is released with device_put or a non-exclusive
380  * set call.
381  *
382  * Return: 0 if all goes well, else appropriate error message
383  */
ti_sci_device_get_exclusive(uint32_t id)384 int ti_sci_device_get_exclusive(uint32_t id)
385 {
386 	return ti_sci_device_set_state(id,
387 				       MSG_FLAG_DEVICE_EXCLUSIVE,
388 				       MSG_DEVICE_SW_STATE_ON);
389 }
390 
391 /**
392  * ti_sci_device_idle() - Idle a device managed by TISCI
393  *
394  * @id:		Device Identifier
395  *
396  * Request for the device - NOTE: the client MUST maintain integrity of
397  * usage count by balancing get_device with put_device. No refcounting is
398  * managed by driver for that purpose.
399  *
400  * Return: 0 if all goes well, else appropriate error message
401  */
ti_sci_device_idle(uint32_t id)402 int ti_sci_device_idle(uint32_t id)
403 {
404 	return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_RETENTION);
405 }
406 
407 /**
408  * ti_sci_device_idle_exclusive() - Exclusive idle a device managed by TISCI
409  *
410  * @id:		Device Identifier
411  *
412  * Request for the device - NOTE: the client MUST maintain integrity of
413  * usage count by balancing get_device with put_device. No refcounting is
414  * managed by driver for that purpose.
415  *
416  * NOTE: This _exclusive version of the idle API is for exclusive access to
417  * the device. Any other host in the system will fail to get this device after
418  * this call until exclusive access is released with device_put or a
419  * non-exclusive set call.
420  *
421  * Return: 0 if all goes well, else appropriate error message
422  */
ti_sci_device_idle_exclusive(uint32_t id)423 int ti_sci_device_idle_exclusive(uint32_t id)
424 {
425 	return ti_sci_device_set_state(id,
426 				       MSG_FLAG_DEVICE_EXCLUSIVE,
427 				       MSG_DEVICE_SW_STATE_RETENTION);
428 }
429 
430 /**
431  * ti_sci_device_put() - Release a device managed by TISCI
432  *
433  * @id:		Device Identifier
434  *
435  * Request for the device - NOTE: the client MUST maintain integrity of
436  * usage count by balancing get_device with put_device. No refcounting is
437  * managed by driver for that purpose.
438  *
439  * Return: 0 if all goes well, else appropriate error message
440  */
ti_sci_device_put(uint32_t id)441 int ti_sci_device_put(uint32_t id)
442 {
443 	return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
444 }
445 
446 /**
447  * ti_sci_device_put_no_wait() - Release a device without requesting or waiting
448  *				 for a response.
449  *
450  * @id:		Device Identifier
451  *
452  * Request for the device - NOTE: the client MUST maintain integrity of
453  * usage count by balancing get_device with put_device. No refcounting is
454  * managed by driver for that purpose.
455  *
456  * Return: 0 if all goes well, else appropriate error message
457  */
ti_sci_device_put_no_wait(uint32_t id)458 int ti_sci_device_put_no_wait(uint32_t id)
459 {
460 	struct ti_sci_msg_req_set_device_state req;
461 	struct ti_sci_xfer xfer;
462 	int ret;
463 
464 	ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_STATE, 0,
465 				    &req, sizeof(req),
466 				    NULL, 0,
467 				    &xfer);
468 	if (ret != 0U) {
469 		ERROR("Message alloc failed (%d)\n", ret);
470 		return ret;
471 	}
472 
473 	req.id = id;
474 	req.state = MSG_DEVICE_SW_STATE_AUTO_OFF;
475 
476 	ret = ti_sci_do_xfer(&xfer);
477 	if (ret != 0U) {
478 		ERROR("Transfer send failed (%d)\n", ret);
479 		return ret;
480 	}
481 
482 	return 0;
483 }
484 
485 /**
486  * ti_sci_device_is_valid() - Is the device valid
487  *
488  * @id:		Device Identifier
489  *
490  * Return: 0 if all goes well and the device ID is valid, else return
491  *         appropriate error
492  */
ti_sci_device_is_valid(uint32_t id)493 int ti_sci_device_is_valid(uint32_t id)
494 {
495 	uint8_t unused;
496 
497 	/* check the device state which will also tell us if the ID is valid */
498 	return ti_sci_device_get_state(id, NULL, NULL, NULL, &unused);
499 }
500 
501 /**
502  * ti_sci_device_get_clcnt() - Get context loss counter
503  *
504  * @id:		Device Identifier
505  * @count:	Pointer to Context Loss counter to populate
506  *
507  * Return: 0 if all goes well, else appropriate error message
508  */
ti_sci_device_get_clcnt(uint32_t id,uint32_t * count)509 int ti_sci_device_get_clcnt(uint32_t id, uint32_t *count)
510 {
511 	return ti_sci_device_get_state(id, count, NULL, NULL, NULL);
512 }
513 
514 /**
515  * ti_sci_device_is_idle() - Check if the device is requested to be idle
516  *
517  * @id:		Device Identifier
518  * @r_state:	true if requested to be idle
519  *
520  * Return: 0 if all goes well, else appropriate error message
521  */
ti_sci_device_is_idle(uint32_t id,bool * r_state)522 int ti_sci_device_is_idle(uint32_t id, bool *r_state)
523 {
524 	int ret;
525 	uint8_t state;
526 
527 	if (!r_state)
528 		return -EINVAL;
529 
530 	ret = ti_sci_device_get_state(id, NULL, NULL, &state, NULL);
531 	if (ret)
532 		return ret;
533 
534 	*r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
535 
536 	return 0;
537 }
538 
539 /**
540  * ti_sci_device_is_stop() - Check if the device is requested to be stopped
541  *
542  * @id:		Device Identifier
543  * @r_state:	true if requested to be stopped
544  * @curr_state:	true if currently stopped
545  *
546  * Return: 0 if all goes well, else appropriate error message
547  */
ti_sci_device_is_stop(uint32_t id,bool * r_state,bool * curr_state)548 int ti_sci_device_is_stop(uint32_t id, bool *r_state,  bool *curr_state)
549 {
550 	int ret;
551 	uint8_t p_state, c_state;
552 
553 	if (!r_state && !curr_state)
554 		return -EINVAL;
555 
556 	ret = ti_sci_device_get_state(id, NULL, NULL, &p_state, &c_state);
557 	if (ret)
558 		return ret;
559 
560 	if (r_state)
561 		*r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
562 	if (curr_state)
563 		*curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
564 
565 	return 0;
566 }
567 
568 /**
569  * ti_sci_device_is_on() - Check if the device is requested to be ON
570  *
571  * @id:		Device Identifier
572  * @r_state:	true if requested to be ON
573  * @curr_state:	true if currently ON and active
574  *
575  * Return: 0 if all goes well, else appropriate error message
576  */
ti_sci_device_is_on(uint32_t id,bool * r_state,bool * curr_state)577 int ti_sci_device_is_on(uint32_t id, bool *r_state,  bool *curr_state)
578 {
579 	int ret;
580 	uint8_t p_state, c_state;
581 
582 	if (!r_state && !curr_state)
583 		return -EINVAL;
584 
585 	ret =
586 	    ti_sci_device_get_state(id, NULL, NULL, &p_state, &c_state);
587 	if (ret)
588 		return ret;
589 
590 	if (r_state)
591 		*r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
592 	if (curr_state)
593 		*curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
594 
595 	return 0;
596 }
597 
598 /**
599  * ti_sci_device_is_trans() - Check if the device is currently transitioning
600  *
601  * @id:		Device Identifier
602  * @curr_state:	true if currently transitioning
603  *
604  * Return: 0 if all goes well, else appropriate error message
605  */
ti_sci_device_is_trans(uint32_t id,bool * curr_state)606 int ti_sci_device_is_trans(uint32_t id, bool *curr_state)
607 {
608 	int ret;
609 	uint8_t state;
610 
611 	if (!curr_state)
612 		return -EINVAL;
613 
614 	ret = ti_sci_device_get_state(id, NULL, NULL, NULL, &state);
615 	if (ret)
616 		return ret;
617 
618 	*curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
619 
620 	return 0;
621 }
622 
623 /**
624  * ti_sci_device_set_resets() - Set resets for device managed by TISCI
625  *
626  * @id:			Device Identifier
627  * @reset_state:	Device specific reset bit field
628  *
629  * Return: 0 if all goes well, else appropriate error message
630  */
ti_sci_device_set_resets(uint32_t id,uint32_t reset_state)631 int ti_sci_device_set_resets(uint32_t id, uint32_t reset_state)
632 {
633 	struct ti_sci_msg_req_set_device_resets req;
634 	struct ti_sci_msg_hdr resp;
635 
636 	struct ti_sci_xfer xfer;
637 	int ret;
638 
639 	ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_RESETS, 0,
640 				    &req, sizeof(req),
641 				    &resp, sizeof(resp),
642 				    &xfer);
643 	if (ret) {
644 		ERROR("Message alloc failed (%d)\n", ret);
645 		return ret;
646 	}
647 
648 	req.id = id;
649 	req.resets = reset_state;
650 
651 	ret = ti_sci_do_xfer(&xfer);
652 	if (ret) {
653 		ERROR("Transfer send failed (%d)\n", ret);
654 		return ret;
655 	}
656 
657 	return 0;
658 }
659 
660 /**
661  * ti_sci_device_get_resets() - Get reset state for device managed by TISCI
662  *
663  * @id:			Device Identifier
664  * @reset_state:	Pointer to reset state to populate
665  *
666  * Return: 0 if all goes well, else appropriate error message
667  */
ti_sci_device_get_resets(uint32_t id,uint32_t * reset_state)668 int ti_sci_device_get_resets(uint32_t id, uint32_t *reset_state)
669 {
670 	return ti_sci_device_get_state(id, NULL, reset_state, NULL, NULL);
671 }
672 
673 /**
674  * ti_sci_clock_set_state() - Set clock state helper
675  *
676  * @dev_id:	Device identifier this request is for
677  * @clk_id:	Clock identifier for the device for this request,
678  *		Each device has its own set of clock inputs, This indexes
679  *		which clock input to modify
680  * @flags:	Header flags as needed
681  * @state:	State to request for the clock
682  *
683  * Return: 0 if all goes well, else appropriate error message
684  */
ti_sci_clock_set_state(uint32_t dev_id,uint8_t clk_id,uint32_t flags,uint8_t state)685 int ti_sci_clock_set_state(uint32_t dev_id, uint8_t clk_id,
686 			   uint32_t flags, uint8_t state)
687 {
688 	struct ti_sci_msg_req_set_clock_state req;
689 	struct ti_sci_msg_hdr resp;
690 
691 	struct ti_sci_xfer xfer;
692 	int ret;
693 
694 	ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_STATE, flags,
695 				    &req, sizeof(req),
696 				    &resp, sizeof(resp),
697 				    &xfer);
698 	if (ret) {
699 		ERROR("Message alloc failed (%d)\n", ret);
700 		return ret;
701 	}
702 
703 	req.dev_id = dev_id;
704 	req.clk_id = clk_id;
705 	req.request_state = state;
706 
707 	ret = ti_sci_do_xfer(&xfer);
708 	if (ret) {
709 		ERROR("Transfer send failed (%d)\n", ret);
710 		return ret;
711 	}
712 
713 	return 0;
714 }
715 
716 /**
717  * ti_sci_clock_get_state() - Get clock state helper
718  *
719  * @dev_id:	Device identifier this request is for
720  * @clk_id:	Clock identifier for the device for this request.
721  *		Each device has its own set of clock inputs. This indexes
722  *		which clock input to modify.
723  * @programmed_state:	State requested for clock to move to
724  * @current_state:	State that the clock is currently in
725  *
726  * Return: 0 if all goes well, else appropriate error message
727  */
ti_sci_clock_get_state(uint32_t dev_id,uint8_t clk_id,uint8_t * programmed_state,uint8_t * current_state)728 int ti_sci_clock_get_state(uint32_t dev_id, uint8_t clk_id,
729 			   uint8_t *programmed_state,
730 			   uint8_t *current_state)
731 {
732 	struct ti_sci_msg_req_get_clock_state req;
733 	struct ti_sci_msg_resp_get_clock_state resp;
734 
735 	struct ti_sci_xfer xfer;
736 	int ret;
737 
738 	if (!programmed_state && !current_state)
739 		return -EINVAL;
740 
741 	ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_STATE, 0,
742 				    &req, sizeof(req),
743 				    &resp, sizeof(resp),
744 				    &xfer);
745 	if (ret) {
746 		ERROR("Message alloc failed (%d)\n", ret);
747 		return ret;
748 	}
749 
750 	req.dev_id = dev_id;
751 	req.clk_id = clk_id;
752 
753 	ret = ti_sci_do_xfer(&xfer);
754 	if (ret) {
755 		ERROR("Transfer send failed (%d)\n", ret);
756 		return ret;
757 	}
758 
759 	if (programmed_state)
760 		*programmed_state = resp.programmed_state;
761 	if (current_state)
762 		*current_state = resp.current_state;
763 
764 	return 0;
765 }
766 
767 /**
768  * ti_sci_clock_get() - Get control of a clock from TI SCI
769 
770  * @dev_id:	Device identifier this request is for
771  * @clk_id:	Clock identifier for the device for this request.
772  *		Each device has its own set of clock inputs. This indexes
773  *		which clock input to modify.
774  * @needs_ssc: 'true' iff Spread Spectrum clock is desired
775  * @can_change_freq: 'true' iff frequency change is desired
776  * @enable_input_term: 'true' iff input termination is desired
777  *
778  * Return: 0 if all goes well, else appropriate error message
779  */
ti_sci_clock_get(uint32_t dev_id,uint8_t clk_id,bool needs_ssc,bool can_change_freq,bool enable_input_term)780 int ti_sci_clock_get(uint32_t dev_id, uint8_t clk_id,
781 		     bool needs_ssc, bool can_change_freq,
782 		     bool enable_input_term)
783 {
784 	uint32_t flags = 0;
785 
786 	flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
787 	flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
788 	flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
789 
790 	return ti_sci_clock_set_state(dev_id, clk_id, flags,
791 				      MSG_CLOCK_SW_STATE_REQ);
792 }
793 
794 /**
795  * ti_sci_clock_idle() - Idle a clock which is in our control
796 
797  * @dev_id:	Device identifier this request is for
798  * @clk_id:	Clock identifier for the device for this request.
799  *		Each device has its own set of clock inputs. This indexes
800  *		which clock input to modify.
801  *
802  * NOTE: This clock must have been requested by get_clock previously.
803  *
804  * Return: 0 if all goes well, else appropriate error message
805  */
ti_sci_clock_idle(uint32_t dev_id,uint8_t clk_id)806 int ti_sci_clock_idle(uint32_t dev_id, uint8_t clk_id)
807 {
808 	return ti_sci_clock_set_state(dev_id, clk_id, 0,
809 				      MSG_CLOCK_SW_STATE_UNREQ);
810 }
811 
812 /**
813  * ti_sci_clock_put() - Release a clock from our control
814  *
815  * @dev_id:	Device identifier this request is for
816  * @clk_id:	Clock identifier for the device for this request.
817  *		Each device has its own set of clock inputs. This indexes
818  *		which clock input to modify.
819  *
820  * NOTE: This clock must have been requested by get_clock previously.
821  *
822  * Return: 0 if all goes well, else appropriate error message
823  */
ti_sci_clock_put(uint32_t dev_id,uint8_t clk_id)824 int ti_sci_clock_put(uint32_t dev_id, uint8_t clk_id)
825 {
826 	return ti_sci_clock_set_state(dev_id, clk_id, 0,
827 				      MSG_CLOCK_SW_STATE_AUTO);
828 }
829 
830 /**
831  * ti_sci_clock_is_auto() - Is the clock being auto managed
832  *
833  * @dev_id:	Device identifier this request is for
834  * @clk_id:	Clock identifier for the device for this request.
835  *		Each device has its own set of clock inputs. This indexes
836  *		which clock input to modify.
837  * @req_state: state indicating if the clock is auto managed
838  *
839  * Return: 0 if all goes well, else appropriate error message
840  */
ti_sci_clock_is_auto(uint32_t dev_id,uint8_t clk_id,bool * req_state)841 int ti_sci_clock_is_auto(uint32_t dev_id, uint8_t clk_id, bool *req_state)
842 {
843 	uint8_t state = 0;
844 	int ret;
845 
846 	if (!req_state)
847 		return -EINVAL;
848 
849 	ret = ti_sci_clock_get_state(dev_id, clk_id, &state, NULL);
850 	if (ret)
851 		return ret;
852 
853 	*req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
854 
855 	return 0;
856 }
857 
858 /**
859  * ti_sci_clock_is_on() - Is the clock ON
860  *
861  * @dev_id:	Device identifier this request is for
862  * @clk_id:	Clock identifier for the device for this request.
863  *		Each device has its own set of clock inputs. This indexes
864  *		which clock input to modify.
865  * @req_state: state indicating if the clock is managed by us and enabled
866  * @curr_state: state indicating if the clock is ready for operation
867  *
868  * Return: 0 if all goes well, else appropriate error message
869  */
ti_sci_clock_is_on(uint32_t dev_id,uint8_t clk_id,bool * req_state,bool * curr_state)870 int ti_sci_clock_is_on(uint32_t dev_id, uint8_t clk_id,
871 		       bool *req_state, bool *curr_state)
872 {
873 	uint8_t c_state = 0, r_state = 0;
874 	int ret;
875 
876 	if (!req_state && !curr_state)
877 		return -EINVAL;
878 
879 	ret = ti_sci_clock_get_state(dev_id, clk_id, &r_state, &c_state);
880 	if (ret)
881 		return ret;
882 
883 	if (req_state)
884 		*req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
885 	if (curr_state)
886 		*curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
887 
888 	return 0;
889 }
890 
891 /**
892  * ti_sci_clock_is_off() - Is the clock OFF
893  *
894  * @dev_id:	Device identifier this request is for
895  * @clk_id:	Clock identifier for the device for this request.
896  *		Each device has its own set of clock inputs. This indexes
897  *		which clock input to modify.
898  * @req_state: state indicating if the clock is managed by us and disabled
899  * @curr_state: state indicating if the clock is NOT ready for operation
900  *
901  * Return: 0 if all goes well, else appropriate error message
902  */
ti_sci_clock_is_off(uint32_t dev_id,uint8_t clk_id,bool * req_state,bool * curr_state)903 int ti_sci_clock_is_off(uint32_t dev_id, uint8_t clk_id,
904 			bool *req_state, bool *curr_state)
905 {
906 	uint8_t c_state = 0, r_state = 0;
907 	int ret;
908 
909 	if (!req_state && !curr_state)
910 		return -EINVAL;
911 
912 	ret = ti_sci_clock_get_state(dev_id, clk_id, &r_state, &c_state);
913 	if (ret)
914 		return ret;
915 
916 	if (req_state)
917 		*req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
918 	if (curr_state)
919 		*curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
920 
921 	return 0;
922 }
923 
924 /**
925  * ti_sci_clock_set_parent() - Set the clock source of a specific device clock
926  *
927  * @dev_id:	Device identifier this request is for
928  * @clk_id:	Clock identifier for the device for this request.
929  *		Each device has its own set of clock inputs. This indexes
930  *		which clock input to modify.
931  * @parent_id:	Parent clock identifier to set
932  *
933  * Return: 0 if all goes well, else appropriate error message
934  */
ti_sci_clock_set_parent(uint32_t dev_id,uint8_t clk_id,uint8_t parent_id)935 int ti_sci_clock_set_parent(uint32_t dev_id, uint8_t clk_id, uint8_t parent_id)
936 {
937 	struct ti_sci_msg_req_set_clock_parent req;
938 	struct ti_sci_msg_hdr resp;
939 
940 	struct ti_sci_xfer xfer;
941 	int ret;
942 
943 	ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_PARENT, 0,
944 				    &req, sizeof(req),
945 				    &resp, sizeof(resp),
946 				    &xfer);
947 	if (ret) {
948 		ERROR("Message alloc failed (%d)\n", ret);
949 		return ret;
950 	}
951 
952 	req.dev_id = dev_id;
953 	req.clk_id = clk_id;
954 	req.parent_id = parent_id;
955 
956 	ret = ti_sci_do_xfer(&xfer);
957 	if (ret) {
958 		ERROR("Transfer send failed (%d)\n", ret);
959 		return ret;
960 	}
961 
962 	return 0;
963 }
964 
965 /**
966  * ti_sci_clock_get_parent() - Get current parent clock source
967  *
968  * @dev_id:	Device identifier this request is for
969  * @clk_id:	Clock identifier for the device for this request.
970  *		Each device has its own set of clock inputs. This indexes
971  *		which clock input to modify.
972  * @parent_id:	Current clock parent
973  *
974  * Return: 0 if all goes well, else appropriate error message
975  */
ti_sci_clock_get_parent(uint32_t dev_id,uint8_t clk_id,uint8_t * parent_id)976 int ti_sci_clock_get_parent(uint32_t dev_id, uint8_t clk_id, uint8_t *parent_id)
977 {
978 	struct ti_sci_msg_req_get_clock_parent req;
979 	struct ti_sci_msg_resp_get_clock_parent resp;
980 
981 	struct ti_sci_xfer xfer;
982 	int ret;
983 
984 	ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_PARENT, 0,
985 				    &req, sizeof(req),
986 				    &resp, sizeof(resp),
987 				    &xfer);
988 	if (ret) {
989 		ERROR("Message alloc failed (%d)\n", ret);
990 		return ret;
991 	}
992 
993 	req.dev_id = dev_id;
994 	req.clk_id = clk_id;
995 
996 	ret = ti_sci_do_xfer(&xfer);
997 	if (ret) {
998 		ERROR("Transfer send failed (%d)\n", ret);
999 		return ret;
1000 	}
1001 
1002 	*parent_id = resp.parent_id;
1003 
1004 	return 0;
1005 }
1006 
1007 /**
1008  * ti_sci_clock_get_num_parents() - Get num parents of the current clk source
1009  *
1010  * @dev_id:	Device identifier this request is for
1011  * @clk_id:	Clock identifier for the device for this request.
1012  *		Each device has its own set of clock inputs. This indexes
1013  *		which clock input to modify.
1014  * @num_parents: Returns he number of parents to the current clock.
1015  *
1016  * Return: 0 if all goes well, else appropriate error message
1017  */
ti_sci_clock_get_num_parents(uint32_t dev_id,uint8_t clk_id,uint8_t * num_parents)1018 int ti_sci_clock_get_num_parents(uint32_t dev_id, uint8_t clk_id,
1019 				 uint8_t *num_parents)
1020 {
1021 	struct ti_sci_msg_req_get_clock_num_parents req;
1022 	struct ti_sci_msg_resp_get_clock_num_parents resp;
1023 
1024 	struct ti_sci_xfer xfer;
1025 	int ret;
1026 
1027 	ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, 0,
1028 				    &req, sizeof(req),
1029 				    &resp, sizeof(resp),
1030 				    &xfer);
1031 	if (ret) {
1032 		ERROR("Message alloc failed (%d)\n", ret);
1033 		return ret;
1034 	}
1035 
1036 	req.dev_id = dev_id;
1037 	req.clk_id = clk_id;
1038 
1039 	ret = ti_sci_do_xfer(&xfer);
1040 	if (ret) {
1041 		ERROR("Transfer send failed (%d)\n", ret);
1042 		return ret;
1043 	}
1044 
1045 	*num_parents = resp.num_parents;
1046 
1047 	return 0;
1048 }
1049 
1050 /**
1051  * ti_sci_clock_get_match_freq() - Find a good match for frequency
1052  *
1053  * @dev_id:	Device identifier this request is for
1054  * @clk_id:	Clock identifier for the device for this request.
1055  *		Each device has its own set of clock inputs. This indexes
1056  *		which clock input to modify.
1057  * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
1058  *		allowable programmed frequency and does not account for clock
1059  *		tolerances and jitter.
1060  * @target_freq: The target clock frequency in Hz. A frequency will be
1061  *		processed as close to this target frequency as possible.
1062  * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
1063  *		allowable programmed frequency and does not account for clock
1064  *		tolerances and jitter.
1065  * @match_freq:	Frequency match in Hz response.
1066  *
1067  * Return: 0 if all goes well, else appropriate error message
1068  */
ti_sci_clock_get_match_freq(uint32_t dev_id,uint8_t clk_id,uint64_t min_freq,uint64_t target_freq,uint64_t max_freq,uint64_t * match_freq)1069 int ti_sci_clock_get_match_freq(uint32_t dev_id, uint8_t clk_id,
1070 				uint64_t min_freq, uint64_t target_freq,
1071 				uint64_t max_freq, uint64_t *match_freq)
1072 {
1073 	struct ti_sci_msg_req_query_clock_freq req;
1074 	struct ti_sci_msg_resp_query_clock_freq resp;
1075 
1076 	struct ti_sci_xfer xfer;
1077 	int ret;
1078 
1079 	ret = ti_sci_setup_one_xfer(TI_SCI_MSG_QUERY_CLOCK_FREQ, 0,
1080 				    &req, sizeof(req),
1081 				    &resp, sizeof(resp),
1082 				    &xfer);
1083 	if (ret) {
1084 		ERROR("Message alloc failed (%d)\n", ret);
1085 		return ret;
1086 	}
1087 
1088 	req.dev_id = dev_id;
1089 	req.clk_id = clk_id;
1090 	req.min_freq_hz = min_freq;
1091 	req.target_freq_hz = target_freq;
1092 	req.max_freq_hz = max_freq;
1093 
1094 	ret = ti_sci_do_xfer(&xfer);
1095 	if (ret) {
1096 		ERROR("Transfer send failed (%d)\n", ret);
1097 		return ret;
1098 	}
1099 
1100 	*match_freq = resp.freq_hz;
1101 
1102 	return 0;
1103 }
1104 
1105 /**
1106  * ti_sci_clock_set_freq() - Set a frequency for clock
1107  *
1108  * @dev_id:	Device identifier this request is for
1109  * @clk_id:	Clock identifier for the device for this request.
1110  *		Each device has its own set of clock inputs. This indexes
1111  *		which clock input to modify.
1112  * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
1113  *		allowable programmed frequency and does not account for clock
1114  *		tolerances and jitter.
1115  * @target_freq: The target clock frequency in Hz. A frequency will be
1116  *		processed as close to this target frequency as possible.
1117  * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
1118  *		allowable programmed frequency and does not account for clock
1119  *		tolerances and jitter.
1120  *
1121  * Return: 0 if all goes well, else appropriate error message
1122  */
ti_sci_clock_set_freq(uint32_t dev_id,uint8_t clk_id,uint64_t min_freq,uint64_t target_freq,uint64_t max_freq)1123 int ti_sci_clock_set_freq(uint32_t dev_id, uint8_t clk_id, uint64_t min_freq,
1124 			  uint64_t target_freq, uint64_t max_freq)
1125 {
1126 	struct ti_sci_msg_req_set_clock_freq req;
1127 	struct ti_sci_msg_hdr resp;
1128 
1129 	struct ti_sci_xfer xfer;
1130 	int ret;
1131 
1132 	ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_FREQ, 0,
1133 				    &req, sizeof(req),
1134 				    &resp, sizeof(resp),
1135 				    &xfer);
1136 	if (ret) {
1137 		ERROR("Message alloc failed (%d)\n", ret);
1138 		return ret;
1139 	}
1140 	req.dev_id = dev_id;
1141 	req.clk_id = clk_id;
1142 	req.min_freq_hz = min_freq;
1143 	req.target_freq_hz = target_freq;
1144 	req.max_freq_hz = max_freq;
1145 
1146 	ret = ti_sci_do_xfer(&xfer);
1147 	if (ret) {
1148 		ERROR("Transfer send failed (%d)\n", ret);
1149 		return ret;
1150 	}
1151 
1152 	return 0;
1153 }
1154 
1155 /**
1156  * ti_sci_clock_get_freq() - Get current frequency
1157  *
1158  * @dev_id:	Device identifier this request is for
1159  * @clk_id:	Clock identifier for the device for this request.
1160  *		Each device has its own set of clock inputs. This indexes
1161  *		which clock input to modify.
1162  * @freq:	Currently frequency in Hz
1163  *
1164  * Return: 0 if all goes well, else appropriate error message
1165  */
ti_sci_clock_get_freq(uint32_t dev_id,uint8_t clk_id,uint64_t * freq)1166 int ti_sci_clock_get_freq(uint32_t dev_id, uint8_t clk_id, uint64_t *freq)
1167 {
1168 	struct ti_sci_msg_req_get_clock_freq req;
1169 	struct ti_sci_msg_resp_get_clock_freq resp;
1170 
1171 	struct ti_sci_xfer xfer;
1172 	int ret;
1173 
1174 	ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_FREQ, 0,
1175 				    &req, sizeof(req),
1176 				    &resp, sizeof(resp),
1177 				    &xfer);
1178 	if (ret) {
1179 		ERROR("Message alloc failed (%d)\n", ret);
1180 		return ret;
1181 	}
1182 
1183 	req.dev_id = dev_id;
1184 	req.clk_id = clk_id;
1185 
1186 	ret = ti_sci_do_xfer(&xfer);
1187 	if (ret) {
1188 		ERROR("Transfer send failed (%d)\n", ret);
1189 		return ret;
1190 	}
1191 
1192 	*freq = resp.freq_hz;
1193 
1194 	return 0;
1195 }
1196 
1197 /**
1198  * ti_sci_core_reboot() - Command to request system reset
1199  *
1200  * Return: 0 if all goes well, else appropriate error message
1201  */
ti_sci_core_reboot(void)1202 int ti_sci_core_reboot(void)
1203 {
1204 	struct ti_sci_msg_req_reboot req;
1205 	struct ti_sci_msg_hdr resp;
1206 
1207 	struct ti_sci_xfer xfer;
1208 	int ret;
1209 
1210 	ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SYS_RESET, 0,
1211 				    &req, sizeof(req),
1212 				    &resp, sizeof(resp),
1213 				    &xfer);
1214 	if (ret) {
1215 		ERROR("Message alloc failed (%d)\n", ret);
1216 		return ret;
1217 	}
1218 	req.domain = TI_SCI_DOMAIN_FULL_SOC_RESET;
1219 
1220 	ret = ti_sci_do_xfer(&xfer);
1221 	if (ret) {
1222 		ERROR("Transfer send failed (%d)\n", ret);
1223 		return ret;
1224 	}
1225 
1226 	return 0;
1227 }
1228 
1229 /**
1230  * ti_sci_proc_request() - Request a physical processor control
1231  *
1232  * @proc_id:	Processor ID this request is for
1233  *
1234  * Return: 0 if all goes well, else appropriate error message
1235  */
ti_sci_proc_request(uint8_t proc_id)1236 int ti_sci_proc_request(uint8_t proc_id)
1237 {
1238 	struct ti_sci_msg_req_proc_request req;
1239 	struct ti_sci_msg_hdr resp;
1240 
1241 	struct ti_sci_xfer xfer;
1242 	int ret;
1243 
1244 	ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_REQUEST, 0,
1245 				    &req, sizeof(req),
1246 				    &resp, sizeof(resp),
1247 				    &xfer);
1248 	if (ret) {
1249 		ERROR("Message alloc failed (%d)\n", ret);
1250 		return ret;
1251 	}
1252 
1253 	req.processor_id = proc_id;
1254 
1255 	ret = ti_sci_do_xfer(&xfer);
1256 	if (ret) {
1257 		ERROR("Transfer send failed (%d)\n", ret);
1258 		return ret;
1259 	}
1260 
1261 	return 0;
1262 }
1263 
1264 /**
1265  * ti_sci_proc_release() - Release a physical processor control
1266  *
1267  * @proc_id:	Processor ID this request is for
1268  *
1269  * Return: 0 if all goes well, else appropriate error message
1270  */
ti_sci_proc_release(uint8_t proc_id)1271 int ti_sci_proc_release(uint8_t proc_id)
1272 {
1273 	struct ti_sci_msg_req_proc_release req;
1274 	struct ti_sci_msg_hdr resp;
1275 
1276 	struct ti_sci_xfer xfer;
1277 	int ret;
1278 
1279 	ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_RELEASE, 0,
1280 				    &req, sizeof(req),
1281 				    &resp, sizeof(resp),
1282 				    &xfer);
1283 	if (ret) {
1284 		ERROR("Message alloc failed (%d)\n", ret);
1285 		return ret;
1286 	}
1287 
1288 	req.processor_id = proc_id;
1289 
1290 	ret = ti_sci_do_xfer(&xfer);
1291 	if (ret) {
1292 		ERROR("Transfer send failed (%d)\n", ret);
1293 		return ret;
1294 	}
1295 
1296 	return 0;
1297 }
1298 
1299 /**
1300  * ti_sci_proc_handover() - Handover a physical processor control to a host in
1301  *                          the processor's access control list.
1302  *
1303  * @proc_id:	Processor ID this request is for
1304  * @host_id:	Host ID to get the control of the processor
1305  *
1306  * Return: 0 if all goes well, else appropriate error message
1307  */
ti_sci_proc_handover(uint8_t proc_id,uint8_t host_id)1308 int ti_sci_proc_handover(uint8_t proc_id, uint8_t host_id)
1309 {
1310 	struct ti_sci_msg_req_proc_handover req;
1311 	struct ti_sci_msg_hdr resp;
1312 
1313 	struct ti_sci_xfer xfer;
1314 	int ret;
1315 
1316 	ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_HANDOVER, 0,
1317 				    &req, sizeof(req),
1318 				    &resp, sizeof(resp),
1319 				    &xfer);
1320 	if (ret) {
1321 		ERROR("Message alloc failed (%d)\n", ret);
1322 		return ret;
1323 	}
1324 
1325 	req.processor_id = proc_id;
1326 	req.host_id = host_id;
1327 
1328 	ret = ti_sci_do_xfer(&xfer);
1329 	if (ret) {
1330 		ERROR("Transfer send failed (%d)\n", ret);
1331 		return ret;
1332 	}
1333 
1334 	return 0;
1335 }
1336 
1337 /**
1338  * ti_sci_proc_set_boot_cfg() - Set the processor boot configuration flags
1339  *
1340  * @proc_id:		Processor ID this request is for
1341  * @config_flags_set:	Configuration flags to be set
1342  * @config_flags_clear:	Configuration flags to be cleared
1343  *
1344  * Return: 0 if all goes well, else appropriate error message
1345  */
ti_sci_proc_set_boot_cfg(uint8_t proc_id,uint64_t bootvector,uint32_t config_flags_set,uint32_t config_flags_clear)1346 int ti_sci_proc_set_boot_cfg(uint8_t proc_id, uint64_t bootvector,
1347 			     uint32_t config_flags_set,
1348 			     uint32_t config_flags_clear)
1349 {
1350 	struct ti_sci_msg_req_set_proc_boot_config req;
1351 	struct ti_sci_msg_hdr resp;
1352 
1353 	struct ti_sci_xfer xfer;
1354 	int ret;
1355 
1356 	ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CONFIG, 0,
1357 				    &req, sizeof(req),
1358 				    &resp, sizeof(resp),
1359 				    &xfer);
1360 	if (ret) {
1361 		ERROR("Message alloc failed (%d)\n", ret);
1362 		return ret;
1363 	}
1364 
1365 	req.processor_id = proc_id;
1366 	req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1367 	req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1368 				TISCI_ADDR_HIGH_SHIFT;
1369 	req.config_flags_set = config_flags_set;
1370 	req.config_flags_clear = config_flags_clear;
1371 
1372 	ret = ti_sci_do_xfer(&xfer);
1373 	if (ret) {
1374 		ERROR("Transfer send failed (%d)\n", ret);
1375 		return ret;
1376 	}
1377 
1378 	return 0;
1379 }
1380 
1381 /**
1382  * ti_sci_proc_set_boot_ctrl() - Set the processor boot control flags
1383  *
1384  * @proc_id:			Processor ID this request is for
1385  * @control_flags_set:		Control flags to be set
1386  * @control_flags_clear:	Control flags to be cleared
1387  *
1388  * Return: 0 if all goes well, else appropriate error message
1389  */
ti_sci_proc_set_boot_ctrl(uint8_t proc_id,uint32_t control_flags_set,uint32_t control_flags_clear)1390 int ti_sci_proc_set_boot_ctrl(uint8_t proc_id, uint32_t control_flags_set,
1391 			      uint32_t control_flags_clear)
1392 {
1393 	struct ti_sci_msg_req_set_proc_boot_ctrl req;
1394 	struct ti_sci_msg_hdr resp;
1395 
1396 	struct ti_sci_xfer xfer;
1397 	int ret;
1398 
1399 	ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CTRL, 0,
1400 				    &req, sizeof(req),
1401 				    &resp, sizeof(resp),
1402 				    &xfer);
1403 	if (ret) {
1404 		ERROR("Message alloc failed (%d)\n", ret);
1405 		return ret;
1406 	}
1407 
1408 	req.processor_id = proc_id;
1409 	req.control_flags_set = control_flags_set;
1410 	req.control_flags_clear = control_flags_clear;
1411 
1412 	ret = ti_sci_do_xfer(&xfer);
1413 	if (ret) {
1414 		ERROR("Transfer send failed (%d)\n", ret);
1415 		return ret;
1416 	}
1417 
1418 	return 0;
1419 }
1420 
1421 /**
1422  * ti_sci_proc_set_boot_ctrl_no_wait() - Set the processor boot control flags
1423  *					 without requesting or waiting for a
1424  *					 response.
1425  *
1426  * @proc_id:			Processor ID this request is for
1427  * @control_flags_set:		Control flags to be set
1428  * @control_flags_clear:	Control flags to be cleared
1429  *
1430  * Return: 0 if all goes well, else appropriate error message
1431  */
ti_sci_proc_set_boot_ctrl_no_wait(uint8_t proc_id,uint32_t control_flags_set,uint32_t control_flags_clear)1432 int ti_sci_proc_set_boot_ctrl_no_wait(uint8_t proc_id,
1433 				      uint32_t control_flags_set,
1434 				      uint32_t control_flags_clear)
1435 {
1436 	struct ti_sci_msg_req_set_proc_boot_ctrl req;
1437 	struct ti_sci_xfer xfer;
1438 	int ret;
1439 
1440 	ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CTRL, 0,
1441 				    &req, sizeof(req),
1442 				    NULL, 0,
1443 				    &xfer);
1444 	if (ret != 0U) {
1445 		ERROR("Message alloc failed (%d)\n", ret);
1446 		return ret;
1447 	}
1448 
1449 	req.processor_id = proc_id;
1450 	req.control_flags_set = control_flags_set;
1451 	req.control_flags_clear = control_flags_clear;
1452 
1453 	ret = ti_sci_do_xfer(&xfer);
1454 	if (ret != 0U) {
1455 		ERROR("Transfer send failed (%d)\n", ret);
1456 		return ret;
1457 	}
1458 
1459 	return 0;
1460 }
1461 
1462 /**
1463  * ti_sci_proc_auth_boot_image() - Authenticate and load image and then set the
1464  *                                 processor configuration flags
1465  *
1466  * @proc_id:	Processor ID this request is for
1467  * @cert_addr:	Memory address at which payload image certificate is located
1468  *
1469  * Return: 0 if all goes well, else appropriate error message
1470  */
ti_sci_proc_auth_boot_image(uint8_t proc_id,uint64_t cert_addr)1471 int ti_sci_proc_auth_boot_image(uint8_t proc_id, uint64_t cert_addr)
1472 {
1473 	struct ti_sci_msg_req_proc_auth_boot_image req;
1474 	struct ti_sci_msg_hdr resp;
1475 
1476 	struct ti_sci_xfer xfer;
1477 	int ret;
1478 
1479 	ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_AUTH_BOOT_IMAGE, 0,
1480 				    &req, sizeof(req),
1481 				    &resp, sizeof(resp),
1482 				    &xfer);
1483 	if (ret) {
1484 		ERROR("Message alloc failed (%d)\n", ret);
1485 		return ret;
1486 	}
1487 
1488 	req.processor_id = proc_id;
1489 	req.cert_addr_low = cert_addr & TISCI_ADDR_LOW_MASK;
1490 	req.cert_addr_high = (cert_addr & TISCI_ADDR_HIGH_MASK) >>
1491 				TISCI_ADDR_HIGH_SHIFT;
1492 
1493 	ret = ti_sci_do_xfer(&xfer);
1494 	if (ret) {
1495 		ERROR("Transfer send failed (%d)\n", ret);
1496 		return ret;
1497 	}
1498 
1499 	return 0;
1500 }
1501 
1502 /**
1503  * ti_sci_proc_get_boot_status() - Get the processor boot status
1504  *
1505  * @proc_id:	Processor ID this request is for
1506  *
1507  * Return: 0 if all goes well, else appropriate error message
1508  */
ti_sci_proc_get_boot_status(uint8_t proc_id,uint64_t * bv,uint32_t * cfg_flags,uint32_t * ctrl_flags,uint32_t * sts_flags)1509 int ti_sci_proc_get_boot_status(uint8_t proc_id, uint64_t *bv,
1510 				uint32_t *cfg_flags,
1511 				uint32_t *ctrl_flags,
1512 				uint32_t *sts_flags)
1513 {
1514 	struct ti_sci_msg_req_get_proc_boot_status req;
1515 	struct ti_sci_msg_resp_get_proc_boot_status resp;
1516 
1517 	struct ti_sci_xfer xfer;
1518 	int ret;
1519 
1520 	ret = ti_sci_setup_one_xfer(TISCI_MSG_GET_PROC_BOOT_STATUS, 0,
1521 				    &req, sizeof(req),
1522 				    &resp, sizeof(resp),
1523 				    &xfer);
1524 	if (ret) {
1525 		ERROR("Message alloc failed (%d)\n", ret);
1526 		return ret;
1527 	}
1528 
1529 	req.processor_id = proc_id;
1530 
1531 	ret = ti_sci_do_xfer(&xfer);
1532 	if (ret) {
1533 		ERROR("Transfer send failed (%d)\n", ret);
1534 		return ret;
1535 	}
1536 
1537 	*bv = (resp.bootvector_low & TISCI_ADDR_LOW_MASK) |
1538 	      (((uint64_t)resp.bootvector_high << TISCI_ADDR_HIGH_SHIFT) &
1539 	       TISCI_ADDR_HIGH_MASK);
1540 	*cfg_flags = resp.config_flags;
1541 	*ctrl_flags = resp.control_flags;
1542 	*sts_flags = resp.status_flags;
1543 
1544 	return 0;
1545 }
1546 
1547 /**
1548  * ti_sci_proc_wait_boot_status() - Wait for a processor boot status
1549  *
1550  * @proc_id:			Processor ID this request is for
1551  * @num_wait_iterations		Total number of iterations we will check before
1552  *				we will timeout and give up
1553  * @num_match_iterations	How many iterations should we have continued
1554  *				status to account for status bits glitching.
1555  *				This is to make sure that match occurs for
1556  *				consecutive checks. This implies that the
1557  *				worst case should consider that the stable
1558  *				time should at the worst be num_wait_iterations
1559  *				num_match_iterations to prevent timeout.
1560  * @delay_per_iteration_us	Specifies how long to wait (in micro seconds)
1561  *				between each status checks. This is the minimum
1562  *				duration, and overhead of register reads and
1563  *				checks are on top of this and can vary based on
1564  *				varied conditions.
1565  * @delay_before_iterations_us	Specifies how long to wait (in micro seconds)
1566  *				before the very first check in the first
1567  *				iteration of status check loop. This is the
1568  *				minimum duration, and overhead of register
1569  *				reads and checks are.
1570  * @status_flags_1_set_all_wait	If non-zero, Specifies that all bits of the
1571  *				status matching this field requested MUST be 1.
1572  * @status_flags_1_set_any_wait	If non-zero, Specifies that at least one of the
1573  *				bits matching this field requested MUST be 1.
1574  * @status_flags_1_clr_all_wait	If non-zero, Specifies that all bits of the
1575  *				status matching this field requested MUST be 0.
1576  * @status_flags_1_clr_any_wait	If non-zero, Specifies that at least one of the
1577  *				bits matching this field requested MUST be 0.
1578  *
1579  * Return: 0 if all goes well, else appropriate error message
1580  */
ti_sci_proc_wait_boot_status(uint8_t proc_id,uint8_t num_wait_iterations,uint8_t num_match_iterations,uint8_t delay_per_iteration_us,uint8_t delay_before_iterations_us,uint32_t status_flags_1_set_all_wait,uint32_t status_flags_1_set_any_wait,uint32_t status_flags_1_clr_all_wait,uint32_t status_flags_1_clr_any_wait)1581 int ti_sci_proc_wait_boot_status(uint8_t proc_id, uint8_t num_wait_iterations,
1582 				 uint8_t num_match_iterations,
1583 				 uint8_t delay_per_iteration_us,
1584 				 uint8_t delay_before_iterations_us,
1585 				 uint32_t status_flags_1_set_all_wait,
1586 				 uint32_t status_flags_1_set_any_wait,
1587 				 uint32_t status_flags_1_clr_all_wait,
1588 				 uint32_t status_flags_1_clr_any_wait)
1589 {
1590 	struct ti_sci_msg_req_wait_proc_boot_status req;
1591 	struct ti_sci_msg_hdr resp;
1592 
1593 	struct ti_sci_xfer xfer;
1594 	int ret;
1595 
1596 	ret = ti_sci_setup_one_xfer(TISCI_MSG_WAIT_PROC_BOOT_STATUS, 0,
1597 				    &req, sizeof(req),
1598 				    &resp, sizeof(resp),
1599 				    &xfer);
1600 	if (ret) {
1601 		ERROR("Message alloc failed (%d)\n", ret);
1602 		return ret;
1603 	}
1604 
1605 	req.processor_id = proc_id;
1606 	req.num_wait_iterations = num_wait_iterations;
1607 	req.num_match_iterations = num_match_iterations;
1608 	req.delay_per_iteration_us = delay_per_iteration_us;
1609 	req.delay_before_iterations_us = delay_before_iterations_us;
1610 	req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
1611 	req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
1612 	req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
1613 	req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
1614 
1615 	ret = ti_sci_do_xfer(&xfer);
1616 	if (ret) {
1617 		ERROR("Transfer send failed (%d)\n", ret);
1618 		return ret;
1619 	}
1620 
1621 	return 0;
1622 }
1623 
1624 /**
1625  * ti_sci_proc_wait_boot_status_no_wait() - Wait for a processor boot status
1626  *					    without requesting or waiting for
1627  *					    a response.
1628  *
1629  * @proc_id:			Processor ID this request is for
1630  * @num_wait_iterations		Total number of iterations we will check before
1631  *				we will timeout and give up
1632  * @num_match_iterations	How many iterations should we have continued
1633  *				status to account for status bits glitching.
1634  *				This is to make sure that match occurs for
1635  *				consecutive checks. This implies that the
1636  *				worst case should consider that the stable
1637  *				time should at the worst be num_wait_iterations
1638  *				num_match_iterations to prevent timeout.
1639  * @delay_per_iteration_us	Specifies how long to wait (in micro seconds)
1640  *				between each status checks. This is the minimum
1641  *				duration, and overhead of register reads and
1642  *				checks are on top of this and can vary based on
1643  *				varied conditions.
1644  * @delay_before_iterations_us	Specifies how long to wait (in micro seconds)
1645  *				before the very first check in the first
1646  *				iteration of status check loop. This is the
1647  *				minimum duration, and overhead of register
1648  *				reads and checks are.
1649  * @status_flags_1_set_all_wait	If non-zero, Specifies that all bits of the
1650  *				status matching this field requested MUST be 1.
1651  * @status_flags_1_set_any_wait	If non-zero, Specifies that at least one of the
1652  *				bits matching this field requested MUST be 1.
1653  * @status_flags_1_clr_all_wait	If non-zero, Specifies that all bits of the
1654  *				status matching this field requested MUST be 0.
1655  * @status_flags_1_clr_any_wait	If non-zero, Specifies that at least one of the
1656  *				bits matching this field requested MUST be 0.
1657  *
1658  * Return: 0 if all goes well, else appropriate error message
1659  */
ti_sci_proc_wait_boot_status_no_wait(uint8_t proc_id,uint8_t num_wait_iterations,uint8_t num_match_iterations,uint8_t delay_per_iteration_us,uint8_t delay_before_iterations_us,uint32_t status_flags_1_set_all_wait,uint32_t status_flags_1_set_any_wait,uint32_t status_flags_1_clr_all_wait,uint32_t status_flags_1_clr_any_wait)1660 int ti_sci_proc_wait_boot_status_no_wait(uint8_t proc_id,
1661 					 uint8_t num_wait_iterations,
1662 					 uint8_t num_match_iterations,
1663 					 uint8_t delay_per_iteration_us,
1664 					 uint8_t delay_before_iterations_us,
1665 					 uint32_t status_flags_1_set_all_wait,
1666 					 uint32_t status_flags_1_set_any_wait,
1667 					 uint32_t status_flags_1_clr_all_wait,
1668 					 uint32_t status_flags_1_clr_any_wait)
1669 {
1670 	struct ti_sci_msg_req_wait_proc_boot_status req;
1671 	struct ti_sci_xfer xfer;
1672 	int ret;
1673 
1674 	ret = ti_sci_setup_one_xfer(TISCI_MSG_WAIT_PROC_BOOT_STATUS, 0,
1675 				    &req, sizeof(req),
1676 				    NULL, 0,
1677 				    &xfer);
1678 	if (ret != 0U) {
1679 		ERROR("Message alloc failed (%d)\n", ret);
1680 		return ret;
1681 	}
1682 
1683 	req.processor_id = proc_id;
1684 	req.num_wait_iterations = num_wait_iterations;
1685 	req.num_match_iterations = num_match_iterations;
1686 	req.delay_per_iteration_us = delay_per_iteration_us;
1687 	req.delay_before_iterations_us = delay_before_iterations_us;
1688 	req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
1689 	req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
1690 	req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
1691 	req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
1692 
1693 	ret = ti_sci_do_xfer(&xfer);
1694 	if (ret != 0U) {
1695 		ERROR("Transfer send failed (%d)\n", ret);
1696 		return ret;
1697 	}
1698 
1699 	return 0;
1700 }
1701 
1702 /**
1703  * ti_sci_enter_sleep - Command to initiate system transition into suspend.
1704  *
1705  * @proc_id: Processor ID.
1706  * @mode: Low power mode to enter.
1707  * @core_resume_addr: Address that core should be
1708  *		      resumed from after low power transition.
1709  *
1710  * Return: 0 if all goes well, else appropriate error message
1711  */
ti_sci_enter_sleep(uint8_t proc_id,uint8_t mode,uint64_t core_resume_addr)1712 int ti_sci_enter_sleep(uint8_t proc_id,
1713 		       uint8_t mode,
1714 		       uint64_t core_resume_addr)
1715 {
1716 	struct ti_sci_msg_req_enter_sleep req;
1717 	struct ti_sci_xfer xfer;
1718 	int ret;
1719 
1720 	ret = ti_sci_setup_one_xfer(TI_SCI_MSG_ENTER_SLEEP, 0,
1721 				    &req, sizeof(req),
1722 				    NULL, 0,
1723 				    &xfer);
1724 	if (ret != 0U) {
1725 		ERROR("Message alloc failed (%d)\n", ret);
1726 		return ret;
1727 	}
1728 
1729 	req.processor_id = proc_id;
1730 	req.mode = mode;
1731 	req.core_resume_lo = core_resume_addr & TISCI_ADDR_LOW_MASK;
1732 	req.core_resume_hi = (core_resume_addr & TISCI_ADDR_HIGH_MASK) >>
1733 			     TISCI_ADDR_HIGH_SHIFT;
1734 
1735 	ret = ti_sci_do_xfer(&xfer);
1736 	if (ret != 0U) {
1737 		ERROR("Transfer send failed (%d)\n", ret);
1738 		return ret;
1739 	}
1740 
1741 	return 0;
1742 }
1743