Lines Matching full:vdev
27 static void ivpu_ipc_msg_dump(struct ivpu_device *vdev, char *c, in ivpu_ipc_msg_dump() argument
30 ivpu_dbg(vdev, IPC, in ivpu_ipc_msg_dump()
36 static void ivpu_jsm_msg_dump(struct ivpu_device *vdev, char *c, in ivpu_jsm_msg_dump() argument
41 ivpu_dbg(vdev, JSM, in ivpu_jsm_msg_dump()
49 ivpu_ipc_rx_mark_free(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr, in ivpu_ipc_rx_mark_free() argument
58 static void ivpu_ipc_mem_fini(struct ivpu_device *vdev) in ivpu_ipc_mem_fini() argument
60 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_mem_fini()
67 ivpu_ipc_tx_prepare(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, in ivpu_ipc_tx_prepare() argument
70 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_tx_prepare()
77 ivpu_err_ratelimited(vdev, "Failed to reserve IPC buffer, size %ld\n", in ivpu_ipc_tx_prepare()
83 if (drm_WARN_ON(&vdev->drm, !tx_buf)) { in ivpu_ipc_tx_prepare()
91 ivpu_warn_ratelimited(vdev, "IPC message vpu:0x%x not released by firmware\n", in ivpu_ipc_tx_prepare()
95 ivpu_warn_ratelimited(vdev, "JSM message vpu:0x%x not released by firmware\n", in ivpu_ipc_tx_prepare()
117 ivpu_jsm_msg_dump(vdev, "TX", &tx_buf->jsm, jsm_vpu_addr); in ivpu_ipc_tx_prepare()
118 ivpu_ipc_msg_dump(vdev, "TX", &tx_buf->ipc, tx_buf_vpu_addr); in ivpu_ipc_tx_prepare()
123 static void ivpu_ipc_tx_release(struct ivpu_device *vdev, u32 vpu_addr) in ivpu_ipc_tx_release() argument
125 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_tx_release()
131 static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr) in ivpu_ipc_tx() argument
133 ivpu_hw_ipc_tx_set(vdev, vpu_addr); in ivpu_ipc_tx()
137 ivpu_ipc_rx_msg_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, in ivpu_ipc_rx_msg_add() argument
140 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_rx_msg_add()
148 ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg); in ivpu_ipc_rx_msg_add()
169 ivpu_ipc_rx_msg_del(struct ivpu_device *vdev, struct ivpu_ipc_rx_msg *rx_msg) in ivpu_ipc_rx_msg_del() argument
172 ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg); in ivpu_ipc_rx_msg_del()
173 atomic_dec(&vdev->ipc->rx_msg_count); in ivpu_ipc_rx_msg_del()
177 void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, in ivpu_ipc_consumer_add() argument
180 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_consumer_add()
197 void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons) in ivpu_ipc_consumer_del() argument
199 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_consumer_del()
208 ivpu_ipc_rx_msg_del(vdev, rx_msg); in ivpu_ipc_consumer_del()
211 ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr); in ivpu_ipc_consumer_del()
214 int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req) in ivpu_ipc_send() argument
216 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_send()
226 ret = ivpu_ipc_tx_prepare(vdev, cons, req); in ivpu_ipc_send()
230 ivpu_ipc_tx(vdev, cons->tx_vpu_addr); in ivpu_ipc_send()
249 int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, in ivpu_ipc_receive() argument
256 if (drm_WARN_ONCE(&vdev->drm, cons->rx_callback, "Consumer works only in async mode\n")) in ivpu_ipc_receive()
283 ivpu_err(vdev, "IPC resp result error: %d\n", rx_msg->jsm_msg->result); in ivpu_ipc_receive()
292 ivpu_ipc_rx_msg_del(vdev, rx_msg); in ivpu_ipc_receive()
298 ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req, in ivpu_ipc_send_receive_internal() argument
305 drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev) && in ivpu_ipc_send_receive_internal()
306 pm_runtime_enabled(vdev->drm.dev)); in ivpu_ipc_send_receive_internal()
308 ivpu_ipc_consumer_add(vdev, &cons, channel, NULL); in ivpu_ipc_send_receive_internal()
310 ret = ivpu_ipc_send(vdev, &cons, req); in ivpu_ipc_send_receive_internal()
312 ivpu_warn_ratelimited(vdev, "IPC send failed: %d\n", ret); in ivpu_ipc_send_receive_internal()
316 ret = ivpu_ipc_receive(vdev, &cons, NULL, resp, timeout_ms); in ivpu_ipc_send_receive_internal()
318 ivpu_warn_ratelimited(vdev, "IPC receive failed: type %s, ret %d\n", in ivpu_ipc_send_receive_internal()
324 ivpu_warn_ratelimited(vdev, "Invalid JSM response type: 0x%x\n", resp->type); in ivpu_ipc_send_receive_internal()
329 ivpu_ipc_consumer_del(vdev, &cons); in ivpu_ipc_send_receive_internal()
333 int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req, in ivpu_ipc_send_receive() argument
341 ret = ivpu_rpm_get(vdev); in ivpu_ipc_send_receive()
345 ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp, resp, channel, timeout_ms); in ivpu_ipc_send_receive()
349 hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, in ivpu_ipc_send_receive()
351 vdev->timeout.jsm); in ivpu_ipc_send_receive()
353 ivpu_pm_trigger_recovery(vdev, "IPC timeout"); in ivpu_ipc_send_receive()
356 ivpu_rpm_put(vdev); in ivpu_ipc_send_receive()
360 int ivpu_ipc_send_and_wait(struct ivpu_device *vdev, struct vpu_jsm_msg *req, in ivpu_ipc_send_and_wait() argument
366 ret = ivpu_rpm_get(vdev); in ivpu_ipc_send_and_wait()
370 ivpu_ipc_consumer_add(vdev, &cons, channel, NULL); in ivpu_ipc_send_and_wait()
372 ret = ivpu_ipc_send(vdev, &cons, req); in ivpu_ipc_send_and_wait()
374 ivpu_warn_ratelimited(vdev, "IPC send failed: %d\n", ret); in ivpu_ipc_send_and_wait()
381 ivpu_ipc_consumer_del(vdev, &cons); in ivpu_ipc_send_and_wait()
382 ivpu_rpm_put(vdev); in ivpu_ipc_send_and_wait()
387 ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, in ivpu_ipc_match_consumer() argument
399 void ivpu_ipc_irq_handler(struct ivpu_device *vdev) in ivpu_ipc_irq_handler() argument
401 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_irq_handler()
413 while (ivpu_hw_ipc_rx_count_get(vdev)) { in ivpu_ipc_irq_handler()
414 vpu_addr = ivpu_hw_ipc_rx_addr_get(vdev); in ivpu_ipc_irq_handler()
416 ivpu_err_ratelimited(vdev, "Failed to read IPC rx addr register\n"); in ivpu_ipc_irq_handler()
422 ivpu_warn_ratelimited(vdev, "IPC msg 0x%x out of range\n", vpu_addr); in ivpu_ipc_irq_handler()
425 ivpu_ipc_msg_dump(vdev, "RX", ipc_hdr, vpu_addr); in ivpu_ipc_irq_handler()
431 ivpu_warn_ratelimited(vdev, "JSM msg 0x%x out of range\n", in ivpu_ipc_irq_handler()
433 ivpu_ipc_rx_mark_free(vdev, ipc_hdr, NULL); in ivpu_ipc_irq_handler()
436 ivpu_jsm_msg_dump(vdev, "RX", jsm_msg, ipc_hdr->data_addr); in ivpu_ipc_irq_handler()
440 ivpu_warn_ratelimited(vdev, "IPC RX msg dropped, msg count %d\n", in ivpu_ipc_irq_handler()
442 ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg); in ivpu_ipc_irq_handler()
449 if (ivpu_ipc_match_consumer(vdev, cons, ipc_hdr, jsm_msg)) { in ivpu_ipc_irq_handler()
450 ivpu_ipc_rx_msg_add(vdev, cons, ipc_hdr, jsm_msg); in ivpu_ipc_irq_handler()
458 ivpu_dbg(vdev, IPC, "IPC RX msg 0x%x dropped (no consumer)\n", vpu_addr); in ivpu_ipc_irq_handler()
459 ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg); in ivpu_ipc_irq_handler()
464 if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_IPC)) in ivpu_ipc_irq_handler()
465 ivpu_err_ratelimited(vdev, "IRQ FIFO full\n"); in ivpu_ipc_irq_handler()
468 void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev) in ivpu_ipc_irq_thread_handler() argument
470 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_irq_thread_handler()
481 rx_msg->callback(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg); in ivpu_ipc_irq_thread_handler()
482 ivpu_ipc_rx_msg_del(vdev, rx_msg); in ivpu_ipc_irq_thread_handler()
486 int ivpu_ipc_init(struct ivpu_device *vdev) in ivpu_ipc_init() argument
488 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_init()
491 ipc->mem_tx = ivpu_bo_create_global(vdev, SZ_16K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); in ivpu_ipc_init()
493 ivpu_err(vdev, "Failed to allocate mem_tx\n"); in ivpu_ipc_init()
497 ipc->mem_rx = ivpu_bo_create_global(vdev, SZ_16K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); in ivpu_ipc_init()
499 ivpu_err(vdev, "Failed to allocate mem_rx\n"); in ivpu_ipc_init()
504 ipc->mm_tx = devm_gen_pool_create(vdev->drm.dev, __ffs(IVPU_IPC_ALIGNMENT), in ivpu_ipc_init()
508 ivpu_err(vdev, "Failed to create gen pool, %pe\n", ipc->mm_tx); in ivpu_ipc_init()
514 ivpu_err(vdev, "gen_pool_add failed, ret %d\n", ret); in ivpu_ipc_init()
521 ret = drmm_mutex_init(&vdev->drm, &ipc->lock); in ivpu_ipc_init()
523 ivpu_err(vdev, "Failed to initialize ipc->lock, ret %d\n", ret); in ivpu_ipc_init()
526 ivpu_ipc_reset(vdev); in ivpu_ipc_init()
536 void ivpu_ipc_fini(struct ivpu_device *vdev) in ivpu_ipc_fini() argument
538 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_fini()
540 drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cons_list)); in ivpu_ipc_fini()
541 drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list)); in ivpu_ipc_fini()
542 drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0); in ivpu_ipc_fini()
544 ivpu_ipc_mem_fini(vdev); in ivpu_ipc_fini()
547 void ivpu_ipc_enable(struct ivpu_device *vdev) in ivpu_ipc_enable() argument
549 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_enable()
556 void ivpu_ipc_disable(struct ivpu_device *vdev) in ivpu_ipc_disable() argument
558 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_disable()
562 drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list)); in ivpu_ipc_disable()
574 ivpu_ipc_rx_msg_del(vdev, rx_msg); in ivpu_ipc_disable()
580 drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0); in ivpu_ipc_disable()
583 void ivpu_ipc_reset(struct ivpu_device *vdev) in ivpu_ipc_reset() argument
585 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_reset()
588 drm_WARN_ON(&vdev->drm, ipc->on); in ivpu_ipc_reset()