1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2024 Intel Corporation
4  */
5 
6 #include <linux/genalloc.h>
7 #include <linux/highmem.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/wait.h>
10 
11 #include "ivpu_drv.h"
12 #include "ivpu_gem.h"
13 #include "ivpu_hw.h"
14 #include "ivpu_hw_reg_io.h"
15 #include "ivpu_ipc.h"
16 #include "ivpu_jsm_msg.h"
17 #include "ivpu_pm.h"
18 #include "ivpu_trace.h"
19 
20 #define IPC_MAX_RX_MSG	128
21 
22 struct ivpu_ipc_tx_buf {
23 	struct ivpu_ipc_hdr ipc;
24 	struct vpu_jsm_msg jsm;
25 };
26 
ivpu_ipc_msg_dump(struct ivpu_device * vdev,char * c,struct ivpu_ipc_hdr * ipc_hdr,u32 vpu_addr)27 static void ivpu_ipc_msg_dump(struct ivpu_device *vdev, char *c,
28 			      struct ivpu_ipc_hdr *ipc_hdr, u32 vpu_addr)
29 {
30 	ivpu_dbg(vdev, IPC,
31 		 "%s: vpu:0x%x (data_addr:0x%08x, data_size:0x%x, channel:0x%x, src_node:0x%x, dst_node:0x%x, status:0x%x)",
32 		 c, vpu_addr, ipc_hdr->data_addr, ipc_hdr->data_size, ipc_hdr->channel,
33 		 ipc_hdr->src_node, ipc_hdr->dst_node, ipc_hdr->status);
34 }
35 
ivpu_jsm_msg_dump(struct ivpu_device * vdev,char * c,struct vpu_jsm_msg * jsm_msg,u32 vpu_addr)36 static void ivpu_jsm_msg_dump(struct ivpu_device *vdev, char *c,
37 			      struct vpu_jsm_msg *jsm_msg, u32 vpu_addr)
38 {
39 	u32 *payload = (u32 *)&jsm_msg->payload;
40 
41 	ivpu_dbg(vdev, JSM,
42 		 "%s: vpu:0x%08x (type:%s, status:0x%x, id: 0x%x, result: 0x%x, payload:0x%x 0x%x 0x%x 0x%x 0x%x)\n",
43 		 c, vpu_addr, ivpu_jsm_msg_type_to_str(jsm_msg->type),
44 		 jsm_msg->status, jsm_msg->request_id, jsm_msg->result,
45 		 payload[0], payload[1], payload[2], payload[3], payload[4]);
46 }
47 
48 static void
ivpu_ipc_rx_mark_free(struct ivpu_device * vdev,struct ivpu_ipc_hdr * ipc_hdr,struct vpu_jsm_msg * jsm_msg)49 ivpu_ipc_rx_mark_free(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
50 		      struct vpu_jsm_msg *jsm_msg)
51 {
52 	ipc_hdr->status = IVPU_IPC_HDR_FREE;
53 	if (jsm_msg)
54 		jsm_msg->status = VPU_JSM_MSG_FREE;
55 	wmb(); /* Flush WC buffers for message statuses */
56 }
57 
ivpu_ipc_mem_fini(struct ivpu_device * vdev)58 static void ivpu_ipc_mem_fini(struct ivpu_device *vdev)
59 {
60 	struct ivpu_ipc_info *ipc = vdev->ipc;
61 
62 	ivpu_bo_free(ipc->mem_rx);
63 	ivpu_bo_free(ipc->mem_tx);
64 }
65 
66 static int
ivpu_ipc_tx_prepare(struct ivpu_device * vdev,struct ivpu_ipc_consumer * cons,struct vpu_jsm_msg * req)67 ivpu_ipc_tx_prepare(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
68 		    struct vpu_jsm_msg *req)
69 {
70 	struct ivpu_ipc_info *ipc = vdev->ipc;
71 	struct ivpu_ipc_tx_buf *tx_buf;
72 	u32 tx_buf_vpu_addr;
73 	u32 jsm_vpu_addr;
74 
75 	tx_buf_vpu_addr = gen_pool_alloc(ipc->mm_tx, sizeof(*tx_buf));
76 	if (!tx_buf_vpu_addr) {
77 		ivpu_err_ratelimited(vdev, "Failed to reserve IPC buffer, size %ld\n",
78 				     sizeof(*tx_buf));
79 		return -ENOMEM;
80 	}
81 
82 	tx_buf = ivpu_to_cpu_addr(ipc->mem_tx, tx_buf_vpu_addr);
83 	if (drm_WARN_ON(&vdev->drm, !tx_buf)) {
84 		gen_pool_free(ipc->mm_tx, tx_buf_vpu_addr, sizeof(*tx_buf));
85 		return -EIO;
86 	}
87 
88 	jsm_vpu_addr = tx_buf_vpu_addr + offsetof(struct ivpu_ipc_tx_buf, jsm);
89 
90 	if (tx_buf->ipc.status != IVPU_IPC_HDR_FREE)
91 		ivpu_warn_ratelimited(vdev, "IPC message vpu:0x%x not released by firmware\n",
92 				      tx_buf_vpu_addr);
93 
94 	if (tx_buf->jsm.status != VPU_JSM_MSG_FREE)
95 		ivpu_warn_ratelimited(vdev, "JSM message vpu:0x%x not released by firmware\n",
96 				      jsm_vpu_addr);
97 
98 	memset(tx_buf, 0, sizeof(*tx_buf));
99 	tx_buf->ipc.data_addr = jsm_vpu_addr;
100 	/* TODO: Set data_size to actual JSM message size, not union of all messages */
101 	tx_buf->ipc.data_size = sizeof(*req);
102 	tx_buf->ipc.channel = cons->channel;
103 	tx_buf->ipc.src_node = 0;
104 	tx_buf->ipc.dst_node = 1;
105 	tx_buf->ipc.status = IVPU_IPC_HDR_ALLOCATED;
106 	tx_buf->jsm.type = req->type;
107 	tx_buf->jsm.status = VPU_JSM_MSG_ALLOCATED;
108 	tx_buf->jsm.payload = req->payload;
109 
110 	req->request_id = atomic_inc_return(&ipc->request_id);
111 	tx_buf->jsm.request_id = req->request_id;
112 	cons->request_id = req->request_id;
113 	wmb(); /* Flush WC buffers for IPC, JSM msgs */
114 
115 	cons->tx_vpu_addr = tx_buf_vpu_addr;
116 
117 	ivpu_jsm_msg_dump(vdev, "TX", &tx_buf->jsm, jsm_vpu_addr);
118 	ivpu_ipc_msg_dump(vdev, "TX", &tx_buf->ipc, tx_buf_vpu_addr);
119 
120 	return 0;
121 }
122 
ivpu_ipc_tx_release(struct ivpu_device * vdev,u32 vpu_addr)123 static void ivpu_ipc_tx_release(struct ivpu_device *vdev, u32 vpu_addr)
124 {
125 	struct ivpu_ipc_info *ipc = vdev->ipc;
126 
127 	if (vpu_addr)
128 		gen_pool_free(ipc->mm_tx, vpu_addr, sizeof(struct ivpu_ipc_tx_buf));
129 }
130 
ivpu_ipc_tx(struct ivpu_device * vdev,u32 vpu_addr)131 static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr)
132 {
133 	ivpu_hw_ipc_tx_set(vdev, vpu_addr);
134 }
135 
136 static void
ivpu_ipc_rx_msg_add(struct ivpu_device * vdev,struct ivpu_ipc_consumer * cons,struct ivpu_ipc_hdr * ipc_hdr,struct vpu_jsm_msg * jsm_msg)137 ivpu_ipc_rx_msg_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
138 		    struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
139 {
140 	struct ivpu_ipc_info *ipc = vdev->ipc;
141 	struct ivpu_ipc_rx_msg *rx_msg;
142 
143 	lockdep_assert_held(&ipc->cons_lock);
144 	lockdep_assert_irqs_disabled();
145 
146 	rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
147 	if (!rx_msg) {
148 		ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
149 		return;
150 	}
151 
152 	atomic_inc(&ipc->rx_msg_count);
153 
154 	rx_msg->ipc_hdr = ipc_hdr;
155 	rx_msg->jsm_msg = jsm_msg;
156 	rx_msg->callback = cons->rx_callback;
157 
158 	if (rx_msg->callback) {
159 		list_add_tail(&rx_msg->link, &ipc->cb_msg_list);
160 	} else {
161 		spin_lock(&cons->rx_lock);
162 		list_add_tail(&rx_msg->link, &cons->rx_msg_list);
163 		spin_unlock(&cons->rx_lock);
164 		wake_up(&cons->rx_msg_wq);
165 	}
166 }
167 
168 static void
ivpu_ipc_rx_msg_del(struct ivpu_device * vdev,struct ivpu_ipc_rx_msg * rx_msg)169 ivpu_ipc_rx_msg_del(struct ivpu_device *vdev, struct ivpu_ipc_rx_msg *rx_msg)
170 {
171 	list_del(&rx_msg->link);
172 	ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
173 	atomic_dec(&vdev->ipc->rx_msg_count);
174 	kfree(rx_msg);
175 }
176 
ivpu_ipc_consumer_add(struct ivpu_device * vdev,struct ivpu_ipc_consumer * cons,u32 channel,ivpu_ipc_rx_callback_t rx_callback)177 void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
178 			   u32 channel, ivpu_ipc_rx_callback_t rx_callback)
179 {
180 	struct ivpu_ipc_info *ipc = vdev->ipc;
181 
182 	INIT_LIST_HEAD(&cons->link);
183 	cons->channel = channel;
184 	cons->tx_vpu_addr = 0;
185 	cons->request_id = 0;
186 	cons->aborted = false;
187 	cons->rx_callback = rx_callback;
188 	spin_lock_init(&cons->rx_lock);
189 	INIT_LIST_HEAD(&cons->rx_msg_list);
190 	init_waitqueue_head(&cons->rx_msg_wq);
191 
192 	spin_lock_irq(&ipc->cons_lock);
193 	list_add_tail(&cons->link, &ipc->cons_list);
194 	spin_unlock_irq(&ipc->cons_lock);
195 }
196 
ivpu_ipc_consumer_del(struct ivpu_device * vdev,struct ivpu_ipc_consumer * cons)197 void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons)
198 {
199 	struct ivpu_ipc_info *ipc = vdev->ipc;
200 	struct ivpu_ipc_rx_msg *rx_msg, *r;
201 
202 	spin_lock_irq(&ipc->cons_lock);
203 	list_del(&cons->link);
204 	spin_unlock_irq(&ipc->cons_lock);
205 
206 	spin_lock_irq(&cons->rx_lock);
207 	list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link)
208 		ivpu_ipc_rx_msg_del(vdev, rx_msg);
209 	spin_unlock_irq(&cons->rx_lock);
210 
211 	ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr);
212 }
213 
ivpu_ipc_send(struct ivpu_device * vdev,struct ivpu_ipc_consumer * cons,struct vpu_jsm_msg * req)214 int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req)
215 {
216 	struct ivpu_ipc_info *ipc = vdev->ipc;
217 	int ret;
218 
219 	mutex_lock(&ipc->lock);
220 
221 	if (!ipc->on) {
222 		ret = -EAGAIN;
223 		goto unlock;
224 	}
225 
226 	ret = ivpu_ipc_tx_prepare(vdev, cons, req);
227 	if (ret)
228 		goto unlock;
229 
230 	ivpu_ipc_tx(vdev, cons->tx_vpu_addr);
231 	trace_jsm("[tx]", req);
232 
233 unlock:
234 	mutex_unlock(&ipc->lock);
235 	return ret;
236 }
237 
ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer * cons)238 static bool ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons)
239 {
240 	bool ret;
241 
242 	spin_lock_irq(&cons->rx_lock);
243 	ret = !list_empty(&cons->rx_msg_list) || cons->aborted;
244 	spin_unlock_irq(&cons->rx_lock);
245 
246 	return ret;
247 }
248 
ivpu_ipc_receive(struct ivpu_device * vdev,struct ivpu_ipc_consumer * cons,struct ivpu_ipc_hdr * ipc_buf,struct vpu_jsm_msg * jsm_msg,unsigned long timeout_ms)249 int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
250 		     struct ivpu_ipc_hdr *ipc_buf,
251 		     struct vpu_jsm_msg *jsm_msg, unsigned long timeout_ms)
252 {
253 	struct ivpu_ipc_rx_msg *rx_msg;
254 	int wait_ret, ret = 0;
255 
256 	if (drm_WARN_ONCE(&vdev->drm, cons->rx_callback, "Consumer works only in async mode\n"))
257 		return -EINVAL;
258 
259 	wait_ret = wait_event_timeout(cons->rx_msg_wq,
260 				      ivpu_ipc_rx_need_wakeup(cons),
261 				      msecs_to_jiffies(timeout_ms));
262 
263 	if (wait_ret == 0)
264 		return -ETIMEDOUT;
265 
266 	spin_lock_irq(&cons->rx_lock);
267 	if (cons->aborted) {
268 		spin_unlock_irq(&cons->rx_lock);
269 		return -ECANCELED;
270 	}
271 	rx_msg = list_first_entry_or_null(&cons->rx_msg_list, struct ivpu_ipc_rx_msg, link);
272 	if (!rx_msg) {
273 		spin_unlock_irq(&cons->rx_lock);
274 		return -EAGAIN;
275 	}
276 
277 	if (ipc_buf)
278 		memcpy(ipc_buf, rx_msg->ipc_hdr, sizeof(*ipc_buf));
279 	if (rx_msg->jsm_msg) {
280 		u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*jsm_msg));
281 
282 		if (rx_msg->jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
283 			ivpu_err(vdev, "IPC resp result error: %d\n", rx_msg->jsm_msg->result);
284 			ret = -EBADMSG;
285 		}
286 
287 		if (jsm_msg)
288 			memcpy(jsm_msg, rx_msg->jsm_msg, size);
289 		trace_jsm("[rx]", rx_msg->jsm_msg);
290 	}
291 
292 	ivpu_ipc_rx_msg_del(vdev, rx_msg);
293 	spin_unlock_irq(&cons->rx_lock);
294 	return ret;
295 }
296 
297 int
ivpu_ipc_send_receive_internal(struct ivpu_device * vdev,struct vpu_jsm_msg * req,enum vpu_ipc_msg_type expected_resp_type,struct vpu_jsm_msg * resp,u32 channel,unsigned long timeout_ms)298 ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
299 			       enum vpu_ipc_msg_type expected_resp_type,
300 			       struct vpu_jsm_msg *resp, u32 channel, unsigned long timeout_ms)
301 {
302 	struct ivpu_ipc_consumer cons;
303 	int ret;
304 
305 	drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev) &&
306 		    pm_runtime_enabled(vdev->drm.dev));
307 
308 	ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
309 
310 	ret = ivpu_ipc_send(vdev, &cons, req);
311 	if (ret) {
312 		ivpu_warn_ratelimited(vdev, "IPC send failed: %d\n", ret);
313 		goto consumer_del;
314 	}
315 
316 	ret = ivpu_ipc_receive(vdev, &cons, NULL, resp, timeout_ms);
317 	if (ret) {
318 		ivpu_warn_ratelimited(vdev, "IPC receive failed: type %s, ret %d\n",
319 				      ivpu_jsm_msg_type_to_str(req->type), ret);
320 		goto consumer_del;
321 	}
322 
323 	if (resp->type != expected_resp_type) {
324 		ivpu_warn_ratelimited(vdev, "Invalid JSM response type: 0x%x\n", resp->type);
325 		ret = -EBADE;
326 	}
327 
328 consumer_del:
329 	ivpu_ipc_consumer_del(vdev, &cons);
330 	return ret;
331 }
332 
ivpu_ipc_send_receive(struct ivpu_device * vdev,struct vpu_jsm_msg * req,enum vpu_ipc_msg_type expected_resp,struct vpu_jsm_msg * resp,u32 channel,unsigned long timeout_ms)333 int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
334 			  enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
335 			  u32 channel, unsigned long timeout_ms)
336 {
337 	struct vpu_jsm_msg hb_req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
338 	struct vpu_jsm_msg hb_resp;
339 	int ret, hb_ret;
340 
341 	ret = ivpu_rpm_get(vdev);
342 	if (ret < 0)
343 		return ret;
344 
345 	ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp, resp, channel, timeout_ms);
346 	if (ret != -ETIMEDOUT)
347 		goto rpm_put;
348 
349 	hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE,
350 						&hb_resp, VPU_IPC_CHAN_ASYNC_CMD,
351 						vdev->timeout.jsm);
352 	if (hb_ret == -ETIMEDOUT)
353 		ivpu_pm_trigger_recovery(vdev, "IPC timeout");
354 
355 rpm_put:
356 	ivpu_rpm_put(vdev);
357 	return ret;
358 }
359 
ivpu_ipc_send_and_wait(struct ivpu_device * vdev,struct vpu_jsm_msg * req,u32 channel,unsigned long timeout_ms)360 int ivpu_ipc_send_and_wait(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
361 			   u32 channel, unsigned long timeout_ms)
362 {
363 	struct ivpu_ipc_consumer cons;
364 	int ret;
365 
366 	ret = ivpu_rpm_get(vdev);
367 	if (ret < 0)
368 		return ret;
369 
370 	ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
371 
372 	ret = ivpu_ipc_send(vdev, &cons, req);
373 	if (ret) {
374 		ivpu_warn_ratelimited(vdev, "IPC send failed: %d\n", ret);
375 		goto consumer_del;
376 	}
377 
378 	msleep(timeout_ms);
379 
380 consumer_del:
381 	ivpu_ipc_consumer_del(vdev, &cons);
382 	ivpu_rpm_put(vdev);
383 	return ret;
384 }
385 
386 static bool
ivpu_ipc_match_consumer(struct ivpu_device * vdev,struct ivpu_ipc_consumer * cons,struct ivpu_ipc_hdr * ipc_hdr,struct vpu_jsm_msg * jsm_msg)387 ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
388 			struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
389 {
390 	if (cons->channel != ipc_hdr->channel)
391 		return false;
392 
393 	if (!jsm_msg || jsm_msg->request_id == cons->request_id)
394 		return true;
395 
396 	return false;
397 }
398 
ivpu_ipc_irq_handler(struct ivpu_device * vdev)399 void ivpu_ipc_irq_handler(struct ivpu_device *vdev)
400 {
401 	struct ivpu_ipc_info *ipc = vdev->ipc;
402 	struct ivpu_ipc_consumer *cons;
403 	struct ivpu_ipc_hdr *ipc_hdr;
404 	struct vpu_jsm_msg *jsm_msg;
405 	unsigned long flags;
406 	bool dispatched;
407 	u32 vpu_addr;
408 
409 	/*
410 	 * Driver needs to purge all messages from IPC FIFO to clear IPC interrupt.
411 	 * Without purge IPC FIFO to 0 next IPC interrupts won't be generated.
412 	 */
413 	while (ivpu_hw_ipc_rx_count_get(vdev)) {
414 		vpu_addr = ivpu_hw_ipc_rx_addr_get(vdev);
415 		if (vpu_addr == REG_IO_ERROR) {
416 			ivpu_err_ratelimited(vdev, "Failed to read IPC rx addr register\n");
417 			return;
418 		}
419 
420 		ipc_hdr = ivpu_to_cpu_addr(ipc->mem_rx, vpu_addr);
421 		if (!ipc_hdr) {
422 			ivpu_warn_ratelimited(vdev, "IPC msg 0x%x out of range\n", vpu_addr);
423 			continue;
424 		}
425 		ivpu_ipc_msg_dump(vdev, "RX", ipc_hdr, vpu_addr);
426 
427 		jsm_msg = NULL;
428 		if (ipc_hdr->channel != IVPU_IPC_CHAN_BOOT_MSG) {
429 			jsm_msg = ivpu_to_cpu_addr(ipc->mem_rx, ipc_hdr->data_addr);
430 			if (!jsm_msg) {
431 				ivpu_warn_ratelimited(vdev, "JSM msg 0x%x out of range\n",
432 						      ipc_hdr->data_addr);
433 				ivpu_ipc_rx_mark_free(vdev, ipc_hdr, NULL);
434 				continue;
435 			}
436 			ivpu_jsm_msg_dump(vdev, "RX", jsm_msg, ipc_hdr->data_addr);
437 		}
438 
439 		if (atomic_read(&ipc->rx_msg_count) > IPC_MAX_RX_MSG) {
440 			ivpu_warn_ratelimited(vdev, "IPC RX msg dropped, msg count %d\n",
441 					      IPC_MAX_RX_MSG);
442 			ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
443 			continue;
444 		}
445 
446 		dispatched = false;
447 		spin_lock_irqsave(&ipc->cons_lock, flags);
448 		list_for_each_entry(cons, &ipc->cons_list, link) {
449 			if (ivpu_ipc_match_consumer(vdev, cons, ipc_hdr, jsm_msg)) {
450 				ivpu_ipc_rx_msg_add(vdev, cons, ipc_hdr, jsm_msg);
451 				dispatched = true;
452 				break;
453 			}
454 		}
455 		spin_unlock_irqrestore(&ipc->cons_lock, flags);
456 
457 		if (!dispatched) {
458 			ivpu_dbg(vdev, IPC, "IPC RX msg 0x%x dropped (no consumer)\n", vpu_addr);
459 			ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
460 		}
461 	}
462 
463 	if (!list_empty(&ipc->cb_msg_list))
464 		if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_IPC))
465 			ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
466 }
467 
ivpu_ipc_irq_thread_handler(struct ivpu_device * vdev)468 void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
469 {
470 	struct ivpu_ipc_info *ipc = vdev->ipc;
471 	struct ivpu_ipc_rx_msg *rx_msg, *r;
472 	struct list_head cb_msg_list;
473 
474 	INIT_LIST_HEAD(&cb_msg_list);
475 
476 	spin_lock_irq(&ipc->cons_lock);
477 	list_splice_tail_init(&ipc->cb_msg_list, &cb_msg_list);
478 	spin_unlock_irq(&ipc->cons_lock);
479 
480 	list_for_each_entry_safe(rx_msg, r, &cb_msg_list, link) {
481 		rx_msg->callback(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
482 		ivpu_ipc_rx_msg_del(vdev, rx_msg);
483 	}
484 }
485 
ivpu_ipc_init(struct ivpu_device * vdev)486 int ivpu_ipc_init(struct ivpu_device *vdev)
487 {
488 	struct ivpu_ipc_info *ipc = vdev->ipc;
489 	int ret;
490 
491 	ipc->mem_tx = ivpu_bo_create_global(vdev, SZ_16K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
492 	if (!ipc->mem_tx) {
493 		ivpu_err(vdev, "Failed to allocate mem_tx\n");
494 		return -ENOMEM;
495 	}
496 
497 	ipc->mem_rx = ivpu_bo_create_global(vdev, SZ_16K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
498 	if (!ipc->mem_rx) {
499 		ivpu_err(vdev, "Failed to allocate mem_rx\n");
500 		ret = -ENOMEM;
501 		goto err_free_tx;
502 	}
503 
504 	ipc->mm_tx = devm_gen_pool_create(vdev->drm.dev, __ffs(IVPU_IPC_ALIGNMENT),
505 					  -1, "TX_IPC_JSM");
506 	if (IS_ERR(ipc->mm_tx)) {
507 		ret = PTR_ERR(ipc->mm_tx);
508 		ivpu_err(vdev, "Failed to create gen pool, %pe\n", ipc->mm_tx);
509 		goto err_free_rx;
510 	}
511 
512 	ret = gen_pool_add(ipc->mm_tx, ipc->mem_tx->vpu_addr, ivpu_bo_size(ipc->mem_tx), -1);
513 	if (ret) {
514 		ivpu_err(vdev, "gen_pool_add failed, ret %d\n", ret);
515 		goto err_free_rx;
516 	}
517 
518 	spin_lock_init(&ipc->cons_lock);
519 	INIT_LIST_HEAD(&ipc->cons_list);
520 	INIT_LIST_HEAD(&ipc->cb_msg_list);
521 	ret = drmm_mutex_init(&vdev->drm, &ipc->lock);
522 	if (ret) {
523 		ivpu_err(vdev, "Failed to initialize ipc->lock, ret %d\n", ret);
524 		goto err_free_rx;
525 	}
526 	ivpu_ipc_reset(vdev);
527 	return 0;
528 
529 err_free_rx:
530 	ivpu_bo_free(ipc->mem_rx);
531 err_free_tx:
532 	ivpu_bo_free(ipc->mem_tx);
533 	return ret;
534 }
535 
ivpu_ipc_fini(struct ivpu_device * vdev)536 void ivpu_ipc_fini(struct ivpu_device *vdev)
537 {
538 	struct ivpu_ipc_info *ipc = vdev->ipc;
539 
540 	drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cons_list));
541 	drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list));
542 	drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0);
543 
544 	ivpu_ipc_mem_fini(vdev);
545 }
546 
ivpu_ipc_enable(struct ivpu_device * vdev)547 void ivpu_ipc_enable(struct ivpu_device *vdev)
548 {
549 	struct ivpu_ipc_info *ipc = vdev->ipc;
550 
551 	mutex_lock(&ipc->lock);
552 	ipc->on = true;
553 	mutex_unlock(&ipc->lock);
554 }
555 
ivpu_ipc_disable(struct ivpu_device * vdev)556 void ivpu_ipc_disable(struct ivpu_device *vdev)
557 {
558 	struct ivpu_ipc_info *ipc = vdev->ipc;
559 	struct ivpu_ipc_consumer *cons, *c;
560 	struct ivpu_ipc_rx_msg *rx_msg, *r;
561 
562 	drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list));
563 
564 	mutex_lock(&ipc->lock);
565 	ipc->on = false;
566 	mutex_unlock(&ipc->lock);
567 
568 	spin_lock_irq(&ipc->cons_lock);
569 	list_for_each_entry_safe(cons, c, &ipc->cons_list, link) {
570 		spin_lock(&cons->rx_lock);
571 		if (!cons->rx_callback)
572 			cons->aborted = true;
573 		list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link)
574 			ivpu_ipc_rx_msg_del(vdev, rx_msg);
575 		spin_unlock(&cons->rx_lock);
576 		wake_up(&cons->rx_msg_wq);
577 	}
578 	spin_unlock_irq(&ipc->cons_lock);
579 
580 	drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0);
581 }
582 
ivpu_ipc_reset(struct ivpu_device * vdev)583 void ivpu_ipc_reset(struct ivpu_device *vdev)
584 {
585 	struct ivpu_ipc_info *ipc = vdev->ipc;
586 
587 	mutex_lock(&ipc->lock);
588 	drm_WARN_ON(&vdev->drm, ipc->on);
589 
590 	memset(ivpu_bo_vaddr(ipc->mem_tx), 0, ivpu_bo_size(ipc->mem_tx));
591 	memset(ivpu_bo_vaddr(ipc->mem_rx), 0, ivpu_bo_size(ipc->mem_rx));
592 	wmb(); /* Flush WC buffers for TX and RX rings */
593 
594 	mutex_unlock(&ipc->lock);
595 }
596