Lines Matching +full:rp1 +full:- +full:cfe
1 // SPDX-License-Identifier: GPL-2.0-only
3 * RP1 Camera Front End Driver
5 * Copyright (c) 2021-2024 Raspberry Pi Ltd.
6 * Copyright (c) 2023-2024 Ideas on Board Oy
13 #include <linux/dma-mapping.h>
30 #include <media/v4l2-async.h>
31 #include <media/v4l2-common.h>
32 #include <media/v4l2-ctrls.h>
33 #include <media/v4l2-dev.h>
34 #include <media/v4l2-device.h>
35 #include <media/v4l2-event.h>
36 #include <media/v4l2-fwnode.h>
37 #include <media/v4l2-ioctl.h>
38 #include <media/v4l2-mc.h>
39 #include <media/videobuf2-dma-contig.h>
44 #include "cfe-fmts.h"
45 #include "cfe.h"
47 #include "pisp-fe.h"
50 #include "cfe-trace.h"
52 #define CFE_MODULE_NAME "rp1-cfe"
55 #define cfe_dbg(cfe, fmt, arg...) dev_dbg(&(cfe)->pdev->dev, fmt, ##arg) argument
56 #define cfe_info(cfe, fmt, arg...) dev_info(&(cfe)->pdev->dev, fmt, ##arg) argument
57 #define cfe_err(cfe, fmt, arg...) dev_err(&(cfe)->pdev->dev, fmt, ##arg) argument
129 .name = "csi2-ch0",
141 .name = "csi2-ch1",
147 .name = "csi2-ch2",
153 .name = "csi2-ch3",
159 .name = "fe-image0",
165 .name = "fe-image1",
171 .name = "fe-stats",
177 .name = "fe-config",
184 #define is_fe_node(node) (((node)->id) >= FE_OUT0)
188 (node_desc[(node)->id].caps & V4L2_CAP_VIDEO_CAPTURE)
190 (node_desc[(node)->id].caps & V4L2_CAP_META_CAPTURE)
192 (node_desc[(node)->id].caps & V4L2_CAP_VIDEO_OUTPUT)
194 (node_desc[(node)->id].caps & V4L2_CAP_META_OUTPUT)
201 ((node)->buffer_queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
203 ((node)->buffer_queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
207 ((node)->buffer_queue.type == V4L2_BUF_TYPE_META_CAPTURE)
209 ((node)->buffer_queue.type == V4L2_BUF_TYPE_META_OUTPUT)
253 /* Buffer queue used in video-buf */
262 struct cfe_device *cfe; member
265 /* Frame-start counter */
312 static inline bool is_fe_enabled(struct cfe_device *cfe) in is_fe_enabled() argument
314 return cfe->fe_csi2_channel != -1; in is_fe_enabled()
322 static inline u32 cfg_reg_read(struct cfe_device *cfe, u32 offset) in cfg_reg_read() argument
324 return readl(cfe->mipi_cfg_base + offset); in cfg_reg_read()
327 static inline void cfg_reg_write(struct cfe_device *cfe, u32 offset, u32 val) in cfg_reg_write() argument
329 writel(val, cfe->mipi_cfg_base + offset); in cfg_reg_write()
332 static bool check_state(struct cfe_device *cfe, unsigned long state, in check_state() argument
338 if (!test_bit(bit + (node_id * NUM_STATES), cfe->node_flags)) in check_state()
345 static void set_state(struct cfe_device *cfe, unsigned long state, in set_state() argument
351 set_bit(bit + (node_id * NUM_STATES), cfe->node_flags); in set_state()
354 static void clear_state(struct cfe_device *cfe, unsigned long state, in clear_state() argument
360 clear_bit(bit + (node_id * NUM_STATES), cfe->node_flags); in clear_state()
363 static bool test_any_node(struct cfe_device *cfe, unsigned long cond) in test_any_node() argument
366 if (check_state(cfe, cond, i)) in test_any_node()
373 static bool test_all_nodes(struct cfe_device *cfe, unsigned long precond, in test_all_nodes() argument
377 if (check_state(cfe, precond, i)) { in test_all_nodes()
378 if (!check_state(cfe, cond, i)) in test_all_nodes()
388 struct cfe_device *cfe = s->private; in mipi_cfg_regs_show() local
391 ret = pm_runtime_resume_and_get(&cfe->pdev->dev); in mipi_cfg_regs_show()
395 #define DUMP(reg) seq_printf(s, #reg " \t0x%08x\n", cfg_reg_read(cfe, reg)) in mipi_cfg_regs_show()
403 pm_runtime_put(&cfe->pdev->dev); in mipi_cfg_regs_show()
452 if (!cfe_fmt || !cfe_fmt->remap[CFE_REMAP_16BIT]) in cfe_find_16bit_code()
455 cfe_fmt = find_format_by_pix(cfe_fmt->remap[CFE_REMAP_16BIT]); in cfe_find_16bit_code()
459 return cfe_fmt->code; in cfe_find_16bit_code()
472 if (!cfe_fmt || !cfe_fmt->remap[CFE_REMAP_COMPRESSED]) in cfe_find_compressed_code()
475 cfe_fmt = find_format_by_pix(cfe_fmt->remap[CFE_REMAP_COMPRESSED]); in cfe_find_compressed_code()
479 return cfe_fmt->code; in cfe_find_compressed_code()
482 static void cfe_calc_vid_format_size_bpl(struct cfe_device *cfe, in cfe_calc_vid_format_size_bpl() argument
488 v4l_bound_align_image(&f->fmt.pix.width, MIN_WIDTH, MAX_WIDTH, 2, in cfe_calc_vid_format_size_bpl()
489 &f->fmt.pix.height, MIN_HEIGHT, MAX_HEIGHT, 0, 0); in cfe_calc_vid_format_size_bpl()
492 ALIGN((f->fmt.pix.width * fmt->depth) >> 3, BPL_ALIGNMENT); in cfe_calc_vid_format_size_bpl()
494 if (f->fmt.pix.bytesperline > min_bytesperline && in cfe_calc_vid_format_size_bpl()
495 f->fmt.pix.bytesperline <= MAX_BYTESPERLINE) in cfe_calc_vid_format_size_bpl()
496 f->fmt.pix.bytesperline = in cfe_calc_vid_format_size_bpl()
497 ALIGN(f->fmt.pix.bytesperline, BPL_ALIGNMENT); in cfe_calc_vid_format_size_bpl()
499 f->fmt.pix.bytesperline = min_bytesperline; in cfe_calc_vid_format_size_bpl()
501 f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; in cfe_calc_vid_format_size_bpl()
503 cfe_dbg(cfe, "%s: %p4cc size: %ux%u bpl:%u img_size:%u\n", __func__, in cfe_calc_vid_format_size_bpl()
504 &f->fmt.pix.pixelformat, f->fmt.pix.width, f->fmt.pix.height, in cfe_calc_vid_format_size_bpl()
505 f->fmt.pix.bytesperline, f->fmt.pix.sizeimage); in cfe_calc_vid_format_size_bpl()
508 static void cfe_calc_meta_format_size_bpl(struct cfe_device *cfe, in cfe_calc_meta_format_size_bpl() argument
512 v4l_bound_align_image(&f->fmt.meta.width, MIN_META_WIDTH, MAX_WIDTH, 2, in cfe_calc_meta_format_size_bpl()
513 &f->fmt.meta.height, MIN_META_HEIGHT, MAX_HEIGHT, in cfe_calc_meta_format_size_bpl()
516 f->fmt.meta.bytesperline = (f->fmt.meta.width * fmt->depth) >> 3; in cfe_calc_meta_format_size_bpl()
517 f->fmt.meta.buffersize = f->fmt.meta.height * f->fmt.pix.bytesperline; in cfe_calc_meta_format_size_bpl()
519 cfe_dbg(cfe, "%s: %p4cc size: %ux%u bpl:%u buf_size:%u\n", __func__, in cfe_calc_meta_format_size_bpl()
520 &f->fmt.meta.dataformat, f->fmt.meta.width, f->fmt.meta.height, in cfe_calc_meta_format_size_bpl()
521 f->fmt.meta.bytesperline, f->fmt.meta.buffersize); in cfe_calc_meta_format_size_bpl()
524 static void cfe_schedule_next_csi2_job(struct cfe_device *cfe) in cfe_schedule_next_csi2_job() argument
530 struct cfe_node *node = &cfe->node[i]; in cfe_schedule_next_csi2_job()
533 if (!check_state(cfe, NODE_STREAMING, i)) in cfe_schedule_next_csi2_job()
536 buf = list_first_entry(&node->dma_queue, struct cfe_buffer, in cfe_schedule_next_csi2_job()
538 node->next_frm = buf; in cfe_schedule_next_csi2_job()
539 list_del(&buf->list); in cfe_schedule_next_csi2_job()
541 trace_cfe_csi2_schedule(node->id, &buf->vb.vb2_buf); in cfe_schedule_next_csi2_job()
544 size = node->meta_fmt.fmt.meta.buffersize; in cfe_schedule_next_csi2_job()
548 size = node->vid_fmt.fmt.pix.sizeimage; in cfe_schedule_next_csi2_job()
549 stride = node->vid_fmt.fmt.pix.bytesperline; in cfe_schedule_next_csi2_job()
552 addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0); in cfe_schedule_next_csi2_job()
553 csi2_set_buffer(&cfe->csi2, node->id, addr, stride, size); in cfe_schedule_next_csi2_job()
557 static void cfe_schedule_next_pisp_job(struct cfe_device *cfe) in cfe_schedule_next_pisp_job() argument
564 struct cfe_node *node = &cfe->node[i]; in cfe_schedule_next_pisp_job()
566 if (!check_state(cfe, NODE_STREAMING, i)) in cfe_schedule_next_pisp_job()
569 buf = list_first_entry(&node->dma_queue, struct cfe_buffer, in cfe_schedule_next_pisp_job()
572 trace_cfe_fe_schedule(node->id, &buf->vb.vb2_buf); in cfe_schedule_next_pisp_job()
574 node->next_frm = buf; in cfe_schedule_next_pisp_job()
575 vb2_bufs[node_desc[i].link_pad] = &buf->vb.vb2_buf; in cfe_schedule_next_pisp_job()
576 list_del(&buf->list); in cfe_schedule_next_pisp_job()
579 config_buf = to_cfe_config_buffer(cfe->node[FE_CONFIG].next_frm); in cfe_schedule_next_pisp_job()
580 pisp_fe_submit_job(&cfe->fe, vb2_bufs, &config_buf->config); in cfe_schedule_next_pisp_job()
583 static bool cfe_check_job_ready(struct cfe_device *cfe) in cfe_check_job_ready() argument
586 struct cfe_node *node = &cfe->node[i]; in cfe_check_job_ready()
588 if (!check_state(cfe, NODE_ENABLED, i)) in cfe_check_job_ready()
591 if (list_empty(&node->dma_queue)) in cfe_check_job_ready()
598 static void cfe_prepare_next_job(struct cfe_device *cfe) in cfe_prepare_next_job() argument
600 trace_cfe_prepare_next_job(is_fe_enabled(cfe)); in cfe_prepare_next_job()
602 cfe->job_queued = true; in cfe_prepare_next_job()
603 cfe_schedule_next_csi2_job(cfe); in cfe_prepare_next_job()
604 if (is_fe_enabled(cfe)) in cfe_prepare_next_job()
605 cfe_schedule_next_pisp_job(cfe); in cfe_prepare_next_job()
608 cfe->job_ready = cfe_check_job_ready(cfe); in cfe_prepare_next_job()
614 trace_cfe_buffer_complete(node->id, &node->cur_frm->vb); in cfe_process_buffer_complete()
616 node->cur_frm->vb.sequence = node->fs_count - 1; in cfe_process_buffer_complete()
617 vb2_buffer_done(&node->cur_frm->vb.vb2_buf, state); in cfe_process_buffer_complete()
624 .u.frame_sync.frame_sequence = node->fs_count - 1, in cfe_queue_event_sof()
627 v4l2_event_queue(&node->video_dev, &event); in cfe_queue_event_sof()
632 struct cfe_device *cfe = node->cfe; in cfe_sof_isr() local
635 trace_cfe_frame_start(node->id, node->fs_count); in cfe_sof_isr()
644 if (WARN(node->cur_frm, "%s: [%s] Orphaned frame at seq %u\n", in cfe_sof_isr()
645 __func__, node_desc[node->id].name, node->fs_count)) in cfe_sof_isr()
648 node->cur_frm = node->next_frm; in cfe_sof_isr()
649 node->next_frm = NULL; in cfe_sof_isr()
650 node->fs_count++; in cfe_sof_isr()
652 node->ts = ktime_get_ns(); in cfe_sof_isr()
654 if (!check_state(cfe, NODE_STREAMING, i) || i == node->id) in cfe_sof_isr()
660 if (cfe->node[i].fs_count >= node->fs_count) in cfe_sof_isr()
661 node->ts = cfe->node[i].ts; in cfe_sof_isr()
666 if (matching_fs && cfe->node[i].fs_count != node->fs_count) in cfe_sof_isr()
671 cfe->job_queued = false; in cfe_sof_isr()
673 if (node->cur_frm) in cfe_sof_isr()
674 node->cur_frm->vb.vb2_buf.timestamp = node->ts; in cfe_sof_isr()
676 set_state(cfe, FS_INT, node->id); in cfe_sof_isr()
677 clear_state(cfe, FE_INT, node->id); in cfe_sof_isr()
685 struct cfe_device *cfe = node->cfe; in cfe_eof_isr() local
687 trace_cfe_frame_end(node->id, node->fs_count - 1); in cfe_eof_isr()
689 if (node->cur_frm) in cfe_eof_isr()
692 node->cur_frm = NULL; in cfe_eof_isr()
693 set_state(cfe, FE_INT, node->id); in cfe_eof_isr()
694 clear_state(cfe, FS_INT, node->id); in cfe_eof_isr()
699 struct cfe_device *cfe = dev; in cfe_isr() local
703 sts = cfg_reg_read(cfe, MIPICFG_INTS); in cfe_isr()
706 csi2_isr(&cfe->csi2, sof, eof); in cfe_isr()
709 pisp_fe_isr(&cfe->fe, sof + CSI2_NUM_CHANNELS, in cfe_isr()
712 spin_lock(&cfe->state_lock); in cfe_isr()
715 struct cfe_node *node = &cfe->node[i]; in cfe_isr()
722 if (!check_state(cfe, NODE_STREAMING, i) || !(sof[i] || eof[i])) in cfe_isr()
742 if (sof[i] && !check_state(cfe, FS_INT, i)) { in cfe_isr()
759 if (check_state(cfe, FS_INT, node->id) && in cfe_isr()
760 !check_state(cfe, FE_INT, node->id)) { in cfe_isr()
761 cfe_dbg(cfe, "%s: [%s] Handling missing previous FE interrupt\n", in cfe_isr()
762 __func__, node_desc[node->id].name); in cfe_isr()
769 if (!cfe->job_queued && cfe->job_ready) in cfe_isr()
770 cfe_prepare_next_job(cfe); in cfe_isr()
773 spin_unlock(&cfe->state_lock); in cfe_isr()
782 static int cfe_get_vc_dt_fallback(struct cfe_device *cfe, u8 *vc, u8 *dt) in cfe_get_vc_dt_fallback() argument
788 state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd); in cfe_get_vc_dt_fallback()
792 return -EINVAL; in cfe_get_vc_dt_fallback()
794 cfe_fmt = find_format_by_code(fmt->code); in cfe_get_vc_dt_fallback()
796 return -EINVAL; in cfe_get_vc_dt_fallback()
799 *dt = cfe_fmt->csi_dt; in cfe_get_vc_dt_fallback()
804 static int cfe_get_vc_dt(struct cfe_device *cfe, unsigned int channel, u8 *vc, in cfe_get_vc_dt() argument
813 state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd); in cfe_get_vc_dt()
815 ret = v4l2_subdev_routing_find_opposite_end(&state->routing, in cfe_get_vc_dt()
820 ret = v4l2_subdev_call(cfe->source_sd, pad, get_frame_desc, in cfe_get_vc_dt()
821 cfe->source_pad, &remote_desc); in cfe_get_vc_dt()
822 if (ret == -ENOIOCTLCMD) { in cfe_get_vc_dt()
823 cfe_dbg(cfe, "source does not support get_frame_desc, use fallback\n"); in cfe_get_vc_dt()
824 return cfe_get_vc_dt_fallback(cfe, vc, dt); in cfe_get_vc_dt()
826 cfe_err(cfe, "Failed to get frame descriptor\n"); in cfe_get_vc_dt()
831 cfe_err(cfe, "Frame descriptor does not describe CSI-2 link"); in cfe_get_vc_dt()
832 return -EINVAL; in cfe_get_vc_dt()
841 cfe_err(cfe, "Stream %u not found in remote frame desc\n", in cfe_get_vc_dt()
843 return -EINVAL; in cfe_get_vc_dt()
854 struct cfe_device *cfe = node->cfe; in cfe_start_channel() local
862 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); in cfe_start_channel()
864 start_fe = is_fe_enabled(cfe) && in cfe_start_channel()
865 test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING); in cfe_start_channel()
867 state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd); in cfe_start_channel()
873 cfe_dbg(cfe, "%s: %s using csi2 channel %d\n", __func__, in cfe_start_channel()
874 node_desc[FE_OUT0].name, cfe->fe_csi2_channel); in cfe_start_channel()
876 ret = cfe_get_vc_dt(cfe, cfe->fe_csi2_channel, &vc, &dt); in cfe_start_channel()
881 node_desc[cfe->fe_csi2_channel].link_pad); in cfe_start_channel()
882 fmt = find_format_by_code(source_fmt->code); in cfe_start_channel()
884 width = source_fmt->width; in cfe_start_channel()
885 height = source_fmt->height; in cfe_start_channel()
888 WARN_ON(!fmt->csi_dt); in cfe_start_channel()
897 csi2_start_channel(&cfe->csi2, cfe->fe_csi2_channel, in cfe_start_channel()
900 csi2_set_buffer(&cfe->csi2, cfe->fe_csi2_channel, 0, 0, -1); in cfe_start_channel()
901 pisp_fe_start(&cfe->fe); in cfe_start_channel()
908 ret = cfe_get_vc_dt(cfe, node->id, &vc, &dt); in cfe_start_channel()
911 csi2_stop_channel(&cfe->csi2, in cfe_start_channel()
912 cfe->fe_csi2_channel); in cfe_start_channel()
913 pisp_fe_stop(&cfe->fe); in cfe_start_channel()
922 node_desc[node->id].link_pad); in cfe_start_channel()
923 fmt = find_format_by_code(source_fmt->code); in cfe_start_channel()
926 WARN_ON(!fmt->csi_dt); in cfe_start_channel()
931 width = source_fmt->width; in cfe_start_channel()
932 height = source_fmt->height; in cfe_start_channel()
934 pixfmt = node->vid_fmt.fmt.pix.pixelformat; in cfe_start_channel()
936 if (pixfmt == fmt->remap[CFE_REMAP_16BIT]) { in cfe_start_channel()
938 } else if (pixfmt == fmt->remap[CFE_REMAP_COMPRESSED]) { in cfe_start_channel()
940 csi2_set_compression(&cfe->csi2, node->id, in cfe_start_channel()
946 csi2_start_channel(&cfe->csi2, node->id, in cfe_start_channel()
955 spin_lock_irqsave(&cfe->state_lock, flags); in cfe_start_channel()
956 if (cfe->job_ready && test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING)) in cfe_start_channel()
957 cfe_prepare_next_job(cfe); in cfe_start_channel()
958 spin_unlock_irqrestore(&cfe->state_lock, flags); in cfe_start_channel()
965 struct cfe_device *cfe = node->cfe; in cfe_stop_channel() local
967 cfe_dbg(cfe, "%s: [%s] fe_stop %u\n", __func__, in cfe_stop_channel()
968 node_desc[node->id].name, fe_stop); in cfe_stop_channel()
971 csi2_stop_channel(&cfe->csi2, cfe->fe_csi2_channel); in cfe_stop_channel()
972 pisp_fe_stop(&cfe->fe); in cfe_stop_channel()
976 csi2_stop_channel(&cfe->csi2, node->id); in cfe_stop_channel()
982 struct cfe_device *cfe = node->cfe; in cfe_return_buffers() local
986 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); in cfe_return_buffers()
988 spin_lock_irqsave(&cfe->state_lock, flags); in cfe_return_buffers()
989 list_for_each_entry_safe(buf, tmp, &node->dma_queue, list) { in cfe_return_buffers()
990 list_del(&buf->list); in cfe_return_buffers()
991 trace_cfe_return_buffer(node->id, buf->vb.vb2_buf.index, 2); in cfe_return_buffers()
992 vb2_buffer_done(&buf->vb.vb2_buf, state); in cfe_return_buffers()
995 if (node->cur_frm) { in cfe_return_buffers()
996 trace_cfe_return_buffer(node->id, in cfe_return_buffers()
997 node->cur_frm->vb.vb2_buf.index, 0); in cfe_return_buffers()
998 vb2_buffer_done(&node->cur_frm->vb.vb2_buf, state); in cfe_return_buffers()
1000 if (node->next_frm && node->cur_frm != node->next_frm) { in cfe_return_buffers()
1001 trace_cfe_return_buffer(node->id, in cfe_return_buffers()
1002 node->next_frm->vb.vb2_buf.index, 1); in cfe_return_buffers()
1003 vb2_buffer_done(&node->next_frm->vb.vb2_buf, state); in cfe_return_buffers()
1006 node->cur_frm = NULL; in cfe_return_buffers()
1007 node->next_frm = NULL; in cfe_return_buffers()
1008 spin_unlock_irqrestore(&cfe->state_lock, flags); in cfe_return_buffers()
1020 struct cfe_device *cfe = node->cfe; in cfe_queue_setup() local
1022 node->vid_fmt.fmt.pix.sizeimage : in cfe_queue_setup()
1023 node->meta_fmt.fmt.meta.buffersize; in cfe_queue_setup()
1025 cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name, in cfe_queue_setup()
1026 node->buffer_queue.type); in cfe_queue_setup()
1028 if (vq->max_num_buffers + *nbuffers < 3) in cfe_queue_setup()
1029 *nbuffers = 3 - vq->max_num_buffers; in cfe_queue_setup()
1033 cfe_err(cfe, "sizes[0] %i < size %u\n", sizes[0], size); in cfe_queue_setup()
1034 return -EINVAL; in cfe_queue_setup()
1047 struct cfe_node *node = vb2_get_drv_priv(vb->vb2_queue); in cfe_buffer_prepare()
1048 struct cfe_device *cfe = node->cfe; in cfe_buffer_prepare() local
1052 trace_cfe_buffer_prepare(node->id, vb); in cfe_buffer_prepare()
1054 size = is_image_node(node) ? node->vid_fmt.fmt.pix.sizeimage : in cfe_buffer_prepare()
1055 node->meta_fmt.fmt.meta.buffersize; in cfe_buffer_prepare()
1057 cfe_err(cfe, "data will not fit into plane (%lu < %lu)\n", in cfe_buffer_prepare()
1059 return -EINVAL; in cfe_buffer_prepare()
1062 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size); in cfe_buffer_prepare()
1064 if (node->id == FE_CONFIG) { in cfe_buffer_prepare()
1068 memcpy(&b->config, addr, sizeof(struct pisp_fe_config)); in cfe_buffer_prepare()
1069 return pisp_fe_validate_config(&cfe->fe, &b->config, in cfe_buffer_prepare()
1070 &cfe->node[FE_OUT0].vid_fmt, in cfe_buffer_prepare()
1071 &cfe->node[FE_OUT1].vid_fmt); in cfe_buffer_prepare()
1079 struct cfe_node *node = vb2_get_drv_priv(vb->vb2_queue); in cfe_buffer_queue()
1080 struct cfe_device *cfe = node->cfe; in cfe_buffer_queue() local
1085 spin_lock_irqsave(&cfe->state_lock, flags); in cfe_buffer_queue()
1087 list_add_tail(&buf->list, &node->dma_queue); in cfe_buffer_queue()
1089 if (!cfe->job_ready) in cfe_buffer_queue()
1090 cfe->job_ready = cfe_check_job_ready(cfe); in cfe_buffer_queue()
1092 schedule_now = !cfe->job_queued && cfe->job_ready && in cfe_buffer_queue()
1093 test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING); in cfe_buffer_queue()
1095 trace_cfe_buffer_queue(node->id, vb, schedule_now); in cfe_buffer_queue()
1098 cfe_prepare_next_job(cfe); in cfe_buffer_queue()
1100 spin_unlock_irqrestore(&cfe->state_lock, flags); in cfe_buffer_queue()
1103 static s64 cfe_get_source_link_freq(struct cfe_device *cfe) in cfe_get_source_link_freq() argument
1109 state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd); in cfe_get_source_link_freq()
1121 if (state->routing.num_routes == 1) { in cfe_get_source_link_freq()
1122 struct v4l2_subdev_route *route = &state->routing.routes[0]; in cfe_get_source_link_freq()
1127 route->sink_pad, in cfe_get_source_link_freq()
1128 route->sink_stream); in cfe_get_source_link_freq()
1130 fmt = find_format_by_code(source_fmt->code); in cfe_get_source_link_freq()
1132 return -EINVAL; in cfe_get_source_link_freq()
1134 bpp = fmt->depth; in cfe_get_source_link_freq()
1139 link_freq = v4l2_get_link_freq(cfe->source_sd->ctrl_handler, bpp, in cfe_get_source_link_freq()
1140 2 * cfe->csi2.dphy.active_lanes); in cfe_get_source_link_freq()
1142 cfe_err(cfe, "failed to get link freq for subdev '%s'\n", in cfe_get_source_link_freq()
1143 cfe->source_sd->name); in cfe_get_source_link_freq()
1152 struct cfe_device *cfe = node->cfe; in cfe_start_streaming() local
1158 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); in cfe_start_streaming()
1160 if (!check_state(cfe, NODE_ENABLED, node->id)) { in cfe_start_streaming()
1161 cfe_err(cfe, "%s node link is not enabled.\n", in cfe_start_streaming()
1162 node_desc[node->id].name); in cfe_start_streaming()
1163 ret = -EINVAL; in cfe_start_streaming()
1167 ret = pm_runtime_resume_and_get(&cfe->pdev->dev); in cfe_start_streaming()
1169 cfe_err(cfe, "pm_runtime_resume_and_get failed\n"); in cfe_start_streaming()
1174 if (is_fe_enabled(cfe) && in cfe_start_streaming()
1175 !check_state(cfe, NODE_ENABLED, cfe->node[FE_CONFIG].id)) { in cfe_start_streaming()
1176 cfe_err(cfe, "FE enabled, but FE_CONFIG node is not\n"); in cfe_start_streaming()
1177 ret = -EINVAL; in cfe_start_streaming()
1181 ret = media_pipeline_start(&node->pad, &cfe->pipe); in cfe_start_streaming()
1183 cfe_err(cfe, "Failed to start media pipeline: %d\n", ret); in cfe_start_streaming()
1187 state = v4l2_subdev_lock_and_get_active_state(&cfe->csi2.sd); in cfe_start_streaming()
1189 clear_state(cfe, FS_INT | FE_INT, node->id); in cfe_start_streaming()
1190 set_state(cfe, NODE_STREAMING, node->id); in cfe_start_streaming()
1191 node->fs_count = 0; in cfe_start_streaming()
1197 if (!test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING)) { in cfe_start_streaming()
1198 cfe_dbg(cfe, "Streaming on hold, as all nodes are not set to streaming yet\n"); in cfe_start_streaming()
1203 cfg_reg_write(cfe, MIPICFG_CFG, MIPICFG_CFG_SEL_CSI); in cfe_start_streaming()
1204 cfg_reg_write(cfe, MIPICFG_INTE, in cfe_start_streaming()
1207 ret = v4l2_subdev_call(cfe->source_sd, pad, get_mbus_config, 0, in cfe_start_streaming()
1209 if (ret < 0 && ret != -ENOIOCTLCMD) { in cfe_start_streaming()
1210 cfe_err(cfe, "g_mbus_config failed\n"); in cfe_start_streaming()
1214 cfe->csi2.dphy.active_lanes = mbus_config.bus.mipi_csi2.num_data_lanes; in cfe_start_streaming()
1215 if (!cfe->csi2.dphy.active_lanes) in cfe_start_streaming()
1216 cfe->csi2.dphy.active_lanes = cfe->csi2.dphy.max_lanes; in cfe_start_streaming()
1217 if (cfe->csi2.dphy.active_lanes > cfe->csi2.dphy.max_lanes) { in cfe_start_streaming()
1218 cfe_err(cfe, "Device has requested %u data lanes, which is >%u configured in DT\n", in cfe_start_streaming()
1219 cfe->csi2.dphy.active_lanes, cfe->csi2.dphy.max_lanes); in cfe_start_streaming()
1220 ret = -EINVAL; in cfe_start_streaming()
1224 link_freq = cfe_get_source_link_freq(cfe); in cfe_start_streaming()
1228 cfe->csi2.dphy.dphy_rate = div_s64(link_freq * 2, 1000000); in cfe_start_streaming()
1229 csi2_open_rx(&cfe->csi2); in cfe_start_streaming()
1231 cfe->streams_mask = 0; in cfe_start_streaming()
1233 for_each_active_route(&state->routing, route) in cfe_start_streaming()
1234 cfe->streams_mask |= BIT_ULL(route->sink_stream); in cfe_start_streaming()
1236 ret = v4l2_subdev_enable_streams(cfe->source_sd, cfe->source_pad, in cfe_start_streaming()
1237 cfe->streams_mask); in cfe_start_streaming()
1239 cfe_err(cfe, "stream on failed in subdev\n"); in cfe_start_streaming()
1243 cfe_dbg(cfe, "Streaming enabled\n"); in cfe_start_streaming()
1250 csi2_close_rx(&cfe->csi2); in cfe_start_streaming()
1252 cfg_reg_write(cfe, MIPICFG_INTE, 0); in cfe_start_streaming()
1255 is_fe_enabled(cfe) && test_all_nodes(cfe, NODE_ENABLED, in cfe_start_streaming()
1259 media_pipeline_stop(&node->pad); in cfe_start_streaming()
1261 pm_runtime_put(&cfe->pdev->dev); in cfe_start_streaming()
1264 clear_state(cfe, NODE_STREAMING, node->id); in cfe_start_streaming()
1272 struct cfe_device *cfe = node->cfe; in cfe_stop_streaming() local
1276 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); in cfe_stop_streaming()
1278 spin_lock_irqsave(&cfe->state_lock, flags); in cfe_stop_streaming()
1279 fe_stop = is_fe_enabled(cfe) && in cfe_stop_streaming()
1280 test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING); in cfe_stop_streaming()
1282 cfe->job_ready = false; in cfe_stop_streaming()
1283 clear_state(cfe, NODE_STREAMING, node->id); in cfe_stop_streaming()
1284 spin_unlock_irqrestore(&cfe->state_lock, flags); in cfe_stop_streaming()
1288 if (!test_any_node(cfe, NODE_STREAMING)) { in cfe_stop_streaming()
1292 state = v4l2_subdev_lock_and_get_active_state(&cfe->csi2.sd); in cfe_stop_streaming()
1294 ret = v4l2_subdev_disable_streams(cfe->source_sd, in cfe_stop_streaming()
1295 cfe->source_pad, in cfe_stop_streaming()
1296 cfe->streams_mask); in cfe_stop_streaming()
1298 cfe_err(cfe, "stream disable failed in subdev\n"); in cfe_stop_streaming()
1302 csi2_close_rx(&cfe->csi2); in cfe_stop_streaming()
1304 cfg_reg_write(cfe, MIPICFG_INTE, 0); in cfe_stop_streaming()
1306 cfe_dbg(cfe, "%s: Streaming disabled\n", __func__); in cfe_stop_streaming()
1309 media_pipeline_stop(&node->pad); in cfe_stop_streaming()
1314 pm_runtime_put(&cfe->pdev->dev); in cfe_stop_streaming()
1334 strscpy(cap->driver, CFE_MODULE_NAME, sizeof(cap->driver)); in cfe_querycap()
1335 strscpy(cap->card, CFE_MODULE_NAME, sizeof(cap->card)); in cfe_querycap()
1337 cap->capabilities |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE | in cfe_querycap()
1347 struct cfe_device *cfe = node->cfe; in cfe_enum_fmt_vid_cap() local
1351 return -EINVAL; in cfe_enum_fmt_vid_cap()
1353 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); in cfe_enum_fmt_vid_cap()
1356 if (f->mbus_code && formats[i].code != f->mbus_code) in cfe_enum_fmt_vid_cap()
1367 if (j == f->index) { in cfe_enum_fmt_vid_cap()
1368 f->pixelformat = formats[i].fourcc; in cfe_enum_fmt_vid_cap()
1369 f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; in cfe_enum_fmt_vid_cap()
1375 return -EINVAL; in cfe_enum_fmt_vid_cap()
1383 return -EINVAL; in cfe_g_fmt()
1385 *f = node->vid_fmt; in cfe_g_fmt()
1393 struct cfe_device *cfe = node->cfe; in cfe_validate_fmt_vid_cap() local
1396 cfe_dbg(cfe, "%s: [%s] %ux%u, V4L2 pix %p4cc\n", __func__, in cfe_validate_fmt_vid_cap()
1397 node_desc[node->id].name, f->fmt.pix.width, f->fmt.pix.height, in cfe_validate_fmt_vid_cap()
1398 &f->fmt.pix.pixelformat); in cfe_validate_fmt_vid_cap()
1401 return -EINVAL; in cfe_validate_fmt_vid_cap()
1406 fmt = find_format_by_pix(f->fmt.pix.pixelformat); in cfe_validate_fmt_vid_cap()
1410 f->fmt.pix.pixelformat = fmt->fourcc; in cfe_validate_fmt_vid_cap()
1412 if (is_fe_node(node) && fmt->remap[CFE_REMAP_16BIT]) { in cfe_validate_fmt_vid_cap()
1413 f->fmt.pix.pixelformat = fmt->remap[CFE_REMAP_16BIT]; in cfe_validate_fmt_vid_cap()
1414 fmt = find_format_by_pix(f->fmt.pix.pixelformat); in cfe_validate_fmt_vid_cap()
1417 f->fmt.pix.field = V4L2_FIELD_NONE; in cfe_validate_fmt_vid_cap()
1419 cfe_calc_vid_format_size_bpl(cfe, fmt, f); in cfe_validate_fmt_vid_cap()
1428 struct cfe_device *cfe = node->cfe; in cfe_s_fmt_vid_cap() local
1429 struct vb2_queue *q = &node->buffer_queue; in cfe_s_fmt_vid_cap()
1433 return -EBUSY; in cfe_s_fmt_vid_cap()
1439 node->vid_fmt = *f; in cfe_s_fmt_vid_cap()
1441 cfe_dbg(cfe, "%s: Set %ux%u, V4L2 pix %p4cc\n", __func__, in cfe_s_fmt_vid_cap()
1442 node->vid_fmt.fmt.pix.width, node->vid_fmt.fmt.pix.height, in cfe_s_fmt_vid_cap()
1443 &node->vid_fmt.fmt.pix.pixelformat); in cfe_s_fmt_vid_cap()
1452 struct cfe_device *cfe = node->cfe; in cfe_try_fmt_vid_cap() local
1454 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); in cfe_try_fmt_vid_cap()
1463 struct cfe_device *cfe = node->cfe; in cfe_enum_fmt_meta() local
1465 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); in cfe_enum_fmt_meta()
1468 return -EINVAL; in cfe_enum_fmt_meta()
1470 switch (node->id) { in cfe_enum_fmt_meta()
1472 f->flags = V4L2_FMT_FLAG_META_LINE_BASED; in cfe_enum_fmt_meta()
1474 switch (f->index) { in cfe_enum_fmt_meta()
1476 f->pixelformat = V4L2_META_FMT_GENERIC_8; in cfe_enum_fmt_meta()
1479 f->pixelformat = V4L2_META_FMT_GENERIC_CSI2_10; in cfe_enum_fmt_meta()
1482 f->pixelformat = V4L2_META_FMT_GENERIC_CSI2_12; in cfe_enum_fmt_meta()
1485 return -EINVAL; in cfe_enum_fmt_meta()
1491 if (f->index != 0) in cfe_enum_fmt_meta()
1492 return -EINVAL; in cfe_enum_fmt_meta()
1494 switch (node->id) { in cfe_enum_fmt_meta()
1496 f->pixelformat = V4L2_META_FMT_RPI_FE_STATS; in cfe_enum_fmt_meta()
1499 f->pixelformat = V4L2_META_FMT_RPI_FE_CFG; in cfe_enum_fmt_meta()
1502 return -EINVAL; in cfe_enum_fmt_meta()
1508 struct cfe_device *cfe = node->cfe; in cfe_validate_fmt_meta() local
1511 switch (node->id) { in cfe_validate_fmt_meta()
1513 cfe_dbg(cfe, "%s: [%s] %ux%u, V4L2 meta %p4cc\n", __func__, in cfe_validate_fmt_meta()
1514 node_desc[node->id].name, f->fmt.meta.width, in cfe_validate_fmt_meta()
1515 f->fmt.meta.height, &f->fmt.meta.dataformat); in cfe_validate_fmt_meta()
1519 cfe_dbg(cfe, "%s: [%s] %u bytes, V4L2 meta %p4cc\n", __func__, in cfe_validate_fmt_meta()
1520 node_desc[node->id].name, f->fmt.meta.buffersize, in cfe_validate_fmt_meta()
1521 &f->fmt.meta.dataformat); in cfe_validate_fmt_meta()
1524 return -EINVAL; in cfe_validate_fmt_meta()
1528 return -EINVAL; in cfe_validate_fmt_meta()
1530 switch (node->id) { in cfe_validate_fmt_meta()
1532 fmt = find_format_by_pix(f->fmt.meta.dataformat); in cfe_validate_fmt_meta()
1533 if (!fmt || !(fmt->flags & CFE_FORMAT_FLAG_META_CAP)) in cfe_validate_fmt_meta()
1536 f->fmt.meta.dataformat = fmt->fourcc; in cfe_validate_fmt_meta()
1538 cfe_calc_meta_format_size_bpl(cfe, fmt, f); in cfe_validate_fmt_meta()
1542 f->fmt.meta.dataformat = V4L2_META_FMT_RPI_FE_STATS; in cfe_validate_fmt_meta()
1543 f->fmt.meta.buffersize = sizeof(struct pisp_statistics); in cfe_validate_fmt_meta()
1546 f->fmt.meta.dataformat = V4L2_META_FMT_RPI_FE_CFG; in cfe_validate_fmt_meta()
1547 f->fmt.meta.buffersize = sizeof(struct pisp_fe_config); in cfe_validate_fmt_meta()
1550 return -EINVAL; in cfe_validate_fmt_meta()
1557 struct cfe_device *cfe = node->cfe; in cfe_g_fmt_meta() local
1559 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); in cfe_g_fmt_meta()
1562 return -EINVAL; in cfe_g_fmt_meta()
1564 *f = node->meta_fmt; in cfe_g_fmt_meta()
1572 struct cfe_device *cfe = node->cfe; in cfe_s_fmt_meta() local
1573 struct vb2_queue *q = &node->buffer_queue; in cfe_s_fmt_meta()
1576 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); in cfe_s_fmt_meta()
1579 return -EBUSY; in cfe_s_fmt_meta()
1582 return -EINVAL; in cfe_s_fmt_meta()
1588 node->meta_fmt = *f; in cfe_s_fmt_meta()
1590 cfe_dbg(cfe, "%s: Set %p4cc\n", __func__, in cfe_s_fmt_meta()
1591 &node->meta_fmt.fmt.meta.dataformat); in cfe_s_fmt_meta()
1600 struct cfe_device *cfe = node->cfe; in cfe_try_fmt_meta() local
1602 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); in cfe_try_fmt_meta()
1610 struct cfe_device *cfe = node->cfe; in cfe_enum_framesizes() local
1613 cfe_dbg(cfe, "%s [%s]\n", __func__, node_desc[node->id].name); in cfe_enum_framesizes()
1615 if (fsize->index > 0) in cfe_enum_framesizes()
1616 return -EINVAL; in cfe_enum_framesizes()
1619 fmt = find_format_by_pix(fsize->pixel_format); in cfe_enum_framesizes()
1621 cfe_dbg(cfe, "Invalid pixel code: %x\n", fsize->pixel_format); in cfe_enum_framesizes()
1622 return -EINVAL; in cfe_enum_framesizes()
1627 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; in cfe_enum_framesizes()
1628 fsize->stepwise.min_width = MIN_WIDTH; in cfe_enum_framesizes()
1629 fsize->stepwise.max_width = MAX_WIDTH; in cfe_enum_framesizes()
1630 fsize->stepwise.step_width = 2; in cfe_enum_framesizes()
1631 fsize->stepwise.min_height = MIN_HEIGHT; in cfe_enum_framesizes()
1632 fsize->stepwise.max_height = MAX_HEIGHT; in cfe_enum_framesizes()
1633 fsize->stepwise.step_height = 1; in cfe_enum_framesizes()
1643 struct cfe_device *cfe = node->cfe; in cfe_vb2_ioctl_reqbufs() local
1646 cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name, in cfe_vb2_ioctl_reqbufs()
1647 p->type); in cfe_vb2_ioctl_reqbufs()
1649 if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && in cfe_vb2_ioctl_reqbufs()
1650 p->type != V4L2_BUF_TYPE_META_CAPTURE && in cfe_vb2_ioctl_reqbufs()
1651 p->type != V4L2_BUF_TYPE_META_OUTPUT) in cfe_vb2_ioctl_reqbufs()
1652 return -EINVAL; in cfe_vb2_ioctl_reqbufs()
1654 ret = vb2_queue_change_type(vdev->queue, p->type); in cfe_vb2_ioctl_reqbufs()
1666 struct cfe_device *cfe = node->cfe; in cfe_vb2_ioctl_create_bufs() local
1669 cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name, in cfe_vb2_ioctl_create_bufs()
1670 p->format.type); in cfe_vb2_ioctl_create_bufs()
1672 if (p->format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE && in cfe_vb2_ioctl_create_bufs()
1673 p->format.type != V4L2_BUF_TYPE_META_CAPTURE && in cfe_vb2_ioctl_create_bufs()
1674 p->format.type != V4L2_BUF_TYPE_META_OUTPUT) in cfe_vb2_ioctl_create_bufs()
1675 return -EINVAL; in cfe_vb2_ioctl_create_bufs()
1677 ret = vb2_queue_change_type(vdev->queue, p->format.type); in cfe_vb2_ioctl_create_bufs()
1687 struct cfe_node *node = video_get_drvdata(fh->vdev); in cfe_subscribe_event()
1689 switch (sub->type) { in cfe_subscribe_event()
1742 struct cfe_device *cfe = to_cfe_device(sd->v4l2_dev); in cfe_notify() local
1747 struct cfe_node *node = &cfe->node[i]; in cfe_notify()
1749 if (check_state(cfe, NODE_REGISTERED, i)) in cfe_notify()
1752 v4l2_event_queue(&node->video_dev, arg); in cfe_notify()
1760 /* cfe capture driver file operations */
1772 struct video_device *vd = container_of(link->sink->entity, in cfe_video_link_validate()
1775 struct cfe_device *cfe = node->cfe; in cfe_video_link_validate() local
1781 cfe_dbg(cfe, "%s: [%s] link \"%s\":%u -> \"%s\":%u\n", __func__, in cfe_video_link_validate()
1782 node_desc[node->id].name, in cfe_video_link_validate()
1783 link->source->entity->name, link->source->index, in cfe_video_link_validate()
1784 link->sink->entity->name, link->sink->index); in cfe_video_link_validate()
1786 if (!media_entity_remote_source_pad_unique(link->sink->entity)) { in cfe_video_link_validate()
1787 cfe_err(cfe, "video node %s pad not connected\n", vd->name); in cfe_video_link_validate()
1788 return -ENOTCONN; in cfe_video_link_validate()
1791 source_sd = media_entity_to_v4l2_subdev(link->source->entity); in cfe_video_link_validate()
1795 source_fmt = v4l2_subdev_state_get_format(state, link->source->index); in cfe_video_link_validate()
1797 ret = -EINVAL; in cfe_video_link_validate()
1802 struct v4l2_pix_format *pix_fmt = &node->vid_fmt.fmt.pix; in cfe_video_link_validate()
1805 if (source_fmt->width != pix_fmt->width || in cfe_video_link_validate()
1806 source_fmt->height != pix_fmt->height) { in cfe_video_link_validate()
1807 cfe_err(cfe, "Wrong width or height %ux%u (remote pad set to %ux%u)\n", in cfe_video_link_validate()
1808 pix_fmt->width, pix_fmt->height, in cfe_video_link_validate()
1809 source_fmt->width, source_fmt->height); in cfe_video_link_validate()
1810 ret = -EINVAL; in cfe_video_link_validate()
1814 fmt = find_format_by_code_and_fourcc(source_fmt->code, in cfe_video_link_validate()
1815 pix_fmt->pixelformat); in cfe_video_link_validate()
1817 cfe_err(cfe, "Format mismatch!\n"); in cfe_video_link_validate()
1818 ret = -EINVAL; in cfe_video_link_validate()
1822 struct v4l2_meta_format *meta_fmt = &node->meta_fmt.fmt.meta; in cfe_video_link_validate()
1825 if (source_fmt->width != meta_fmt->width || in cfe_video_link_validate()
1826 source_fmt->height != meta_fmt->height) { in cfe_video_link_validate()
1827 cfe_err(cfe, "Wrong width or height %ux%u (remote pad set to %ux%u)\n", in cfe_video_link_validate()
1828 meta_fmt->width, meta_fmt->height, in cfe_video_link_validate()
1829 source_fmt->width, source_fmt->height); in cfe_video_link_validate()
1830 ret = -EINVAL; in cfe_video_link_validate()
1834 fmt = find_format_by_code_and_fourcc(source_fmt->code, in cfe_video_link_validate()
1835 meta_fmt->dataformat); in cfe_video_link_validate()
1837 cfe_err(cfe, "Format mismatch!\n"); in cfe_video_link_validate()
1838 ret = -EINVAL; in cfe_video_link_validate()
1856 struct media_device *mdev = link->graph_obj.mdev; in cfe_video_link_notify()
1857 struct cfe_device *cfe = container_of(mdev, struct cfe_device, mdev); in cfe_video_link_notify() local
1858 struct media_entity *fe = &cfe->fe.sd.entity; in cfe_video_link_notify()
1859 struct media_entity *csi2 = &cfe->csi2.sd.entity; in cfe_video_link_notify()
1865 cfe_dbg(cfe, "%s: %s[%u] -> %s[%u] 0x%x", __func__, in cfe_video_link_notify()
1866 link->source->entity->name, link->source->index, in cfe_video_link_notify()
1867 link->sink->entity->name, link->sink->index, flags); in cfe_video_link_notify()
1869 spin_lock_irqsave(&cfe->state_lock, lock_flags); in cfe_video_link_notify()
1872 if (link->sink->entity != &cfe->node[i].video_dev.entity && in cfe_video_link_notify()
1873 link->source->entity != &cfe->node[i].video_dev.entity) in cfe_video_link_notify()
1876 if (link->flags & MEDIA_LNK_FL_ENABLED) in cfe_video_link_notify()
1877 set_state(cfe, NODE_ENABLED, i); in cfe_video_link_notify()
1879 clear_state(cfe, NODE_ENABLED, i); in cfe_video_link_notify()
1884 spin_unlock_irqrestore(&cfe->state_lock, lock_flags); in cfe_video_link_notify()
1886 if (link->source->entity != csi2) in cfe_video_link_notify()
1888 if (link->sink->entity != fe) in cfe_video_link_notify()
1890 if (link->sink->index != 0) in cfe_video_link_notify()
1893 cfe->fe_csi2_channel = -1; in cfe_video_link_notify()
1894 if (link->flags & MEDIA_LNK_FL_ENABLED) { in cfe_video_link_notify()
1895 if (link->source->index == node_desc[CSI2_CH0].link_pad) in cfe_video_link_notify()
1896 cfe->fe_csi2_channel = CSI2_CH0; in cfe_video_link_notify()
1897 else if (link->source->index == node_desc[CSI2_CH1].link_pad) in cfe_video_link_notify()
1898 cfe->fe_csi2_channel = CSI2_CH1; in cfe_video_link_notify()
1899 else if (link->source->index == node_desc[CSI2_CH2].link_pad) in cfe_video_link_notify()
1900 cfe->fe_csi2_channel = CSI2_CH2; in cfe_video_link_notify()
1901 else if (link->source->index == node_desc[CSI2_CH3].link_pad) in cfe_video_link_notify()
1902 cfe->fe_csi2_channel = CSI2_CH3; in cfe_video_link_notify()
1905 if (is_fe_enabled(cfe)) in cfe_video_link_notify()
1906 cfe_dbg(cfe, "%s: Found CSI2:%d -> FE:0 link\n", __func__, in cfe_video_link_notify()
1907 cfe->fe_csi2_channel); in cfe_video_link_notify()
1909 cfe_dbg(cfe, "%s: Unable to find CSI2:x -> FE:0 link\n", in cfe_video_link_notify()
1921 struct cfe_device *cfe = container_of(kref, struct cfe_device, kref); in cfe_release() local
1923 media_device_cleanup(&cfe->mdev); in cfe_release()
1925 kfree(cfe); in cfe_release()
1928 static void cfe_put(struct cfe_device *cfe) in cfe_put() argument
1930 kref_put(&cfe->kref, cfe_release); in cfe_put()
1933 static void cfe_get(struct cfe_device *cfe) in cfe_get() argument
1935 kref_get(&cfe->kref); in cfe_get()
1942 cfe_put(node->cfe); in cfe_node_release()
1945 static int cfe_register_node(struct cfe_device *cfe, int id) in cfe_register_node() argument
1950 struct cfe_node *node = &cfe->node[id]; in cfe_register_node()
1953 node->cfe = cfe; in cfe_register_node()
1954 node->id = id; in cfe_register_node()
1958 node->vid_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; in cfe_register_node()
1960 node->vid_fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; in cfe_register_node()
1964 cfe_err(cfe, "Failed to find format code\n"); in cfe_register_node()
1965 return -EINVAL; in cfe_register_node()
1968 node->vid_fmt.fmt.pix.pixelformat = fmt->fourcc; in cfe_register_node()
1969 v4l2_fill_pix_format(&node->vid_fmt.fmt.pix, in cfe_register_node()
1972 ret = cfe_validate_fmt_vid_cap(node, &node->vid_fmt); in cfe_register_node()
1979 node->meta_fmt.type = V4L2_BUF_TYPE_META_CAPTURE; in cfe_register_node()
1981 node->meta_fmt.type = V4L2_BUF_TYPE_META_OUTPUT; in cfe_register_node()
1983 ret = cfe_validate_fmt_meta(node, &node->meta_fmt); in cfe_register_node()
1988 mutex_init(&node->lock); in cfe_register_node()
1990 q = &node->buffer_queue; in cfe_register_node()
1991 q->type = node_supports_image(node) ? node->vid_fmt.type : in cfe_register_node()
1992 node->meta_fmt.type; in cfe_register_node()
1993 q->io_modes = VB2_MMAP | VB2_DMABUF; in cfe_register_node()
1994 q->drv_priv = node; in cfe_register_node()
1995 q->ops = &cfe_video_qops; in cfe_register_node()
1996 q->mem_ops = &vb2_dma_contig_memops; in cfe_register_node()
1997 q->buf_struct_size = id == FE_CONFIG ? sizeof(struct cfe_config_buffer) in cfe_register_node()
1999 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; in cfe_register_node()
2000 q->lock = &node->lock; in cfe_register_node()
2001 q->min_queued_buffers = 1; in cfe_register_node()
2002 q->dev = &cfe->pdev->dev; in cfe_register_node()
2006 cfe_err(cfe, "vb2_queue_init() failed\n"); in cfe_register_node()
2010 INIT_LIST_HEAD(&node->dma_queue); in cfe_register_node()
2012 vdev = &node->video_dev; in cfe_register_node()
2013 vdev->release = cfe_node_release; in cfe_register_node()
2014 vdev->fops = &cfe_fops; in cfe_register_node()
2015 vdev->ioctl_ops = &cfe_ioctl_ops; in cfe_register_node()
2016 vdev->entity.ops = &cfe_media_entity_ops; in cfe_register_node()
2017 vdev->v4l2_dev = &cfe->v4l2_dev; in cfe_register_node()
2018 vdev->vfl_dir = (node_supports_image_output(node) || in cfe_register_node()
2022 vdev->queue = q; in cfe_register_node()
2023 vdev->lock = &node->lock; in cfe_register_node()
2024 vdev->device_caps = node_desc[id].caps; in cfe_register_node()
2025 vdev->device_caps |= V4L2_CAP_STREAMING | V4L2_CAP_IO_MC; in cfe_register_node()
2028 snprintf(vdev->name, sizeof(vdev->name), "%s-%s", CFE_MODULE_NAME, in cfe_register_node()
2032 node->pad.flags = node_desc[id].pad_flags; in cfe_register_node()
2033 media_entity_pads_init(&vdev->entity, 1, &node->pad); in cfe_register_node()
2036 v4l2_disable_ioctl(&node->video_dev, in cfe_register_node()
2038 v4l2_disable_ioctl(&node->video_dev, VIDIOC_ENUM_FRAMESIZES); in cfe_register_node()
2041 ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); in cfe_register_node()
2043 cfe_err(cfe, "Unable to register video device %s\n", in cfe_register_node()
2044 vdev->name); in cfe_register_node()
2048 cfe_info(cfe, "Registered [%s] node id %d as /dev/video%u\n", in cfe_register_node()
2049 vdev->name, id, vdev->num); in cfe_register_node()
2052 * Acquire a reference to cfe, which will be released when the video in cfe_register_node()
2056 cfe_get(cfe); in cfe_register_node()
2057 set_state(cfe, NODE_REGISTERED, id); in cfe_register_node()
2062 static void cfe_unregister_nodes(struct cfe_device *cfe) in cfe_unregister_nodes() argument
2065 struct cfe_node *node = &cfe->node[i]; in cfe_unregister_nodes()
2067 if (check_state(cfe, NODE_REGISTERED, i)) { in cfe_unregister_nodes()
2068 clear_state(cfe, NODE_REGISTERED, i); in cfe_unregister_nodes()
2069 video_unregister_device(&node->video_dev); in cfe_unregister_nodes()
2074 static int cfe_link_node_pads(struct cfe_device *cfe) in cfe_link_node_pads() argument
2079 /* Source -> CSI2 */ in cfe_link_node_pads()
2081 ret = v4l2_create_fwnode_links_to_pad(cfe->source_sd, in cfe_link_node_pads()
2082 &cfe->csi2.pad[CSI2_PAD_SINK], in cfe_link_node_pads()
2086 cfe_err(cfe, "Failed to create links to the source: %d\n", ret); in cfe_link_node_pads()
2090 remote_pad = media_pad_remote_pad_unique(&cfe->csi2.pad[CSI2_PAD_SINK]); in cfe_link_node_pads()
2093 cfe_err(cfe, "Failed to get unique remote source pad: %d\n", in cfe_link_node_pads()
2098 cfe->source_pad = remote_pad->index; in cfe_link_node_pads()
2101 struct cfe_node *node = &cfe->node[i]; in cfe_link_node_pads()
2103 if (!check_state(cfe, NODE_REGISTERED, i)) in cfe_link_node_pads()
2106 /* CSI2 channel # -> /dev/video# */ in cfe_link_node_pads()
2107 ret = media_create_pad_link(&cfe->csi2.sd.entity, in cfe_link_node_pads()
2109 &node->video_dev.entity, 0, 0); in cfe_link_node_pads()
2114 /* CSI2 channel # -> FE Input */ in cfe_link_node_pads()
2115 ret = media_create_pad_link(&cfe->csi2.sd.entity, in cfe_link_node_pads()
2117 &cfe->fe.sd.entity, in cfe_link_node_pads()
2125 struct cfe_node *node = &cfe->node[i]; in cfe_link_node_pads()
2130 /* FE -> /dev/video# */ in cfe_link_node_pads()
2131 src = &cfe->fe.sd.entity; in cfe_link_node_pads()
2133 dst = &node->video_dev.entity; in cfe_link_node_pads()
2136 /* /dev/video# -> FE */ in cfe_link_node_pads()
2137 dst = &cfe->fe.sd.entity; in cfe_link_node_pads()
2139 src = &node->video_dev.entity; in cfe_link_node_pads()
2151 static int cfe_probe_complete(struct cfe_device *cfe) in cfe_probe_complete() argument
2155 cfe->v4l2_dev.notify = cfe_notify; in cfe_probe_complete()
2158 ret = cfe_register_node(cfe, i); in cfe_probe_complete()
2160 cfe_err(cfe, "Unable to register video node %u.\n", i); in cfe_probe_complete()
2165 ret = cfe_link_node_pads(cfe); in cfe_probe_complete()
2167 cfe_err(cfe, "Unable to link node pads.\n"); in cfe_probe_complete()
2171 ret = v4l2_device_register_subdev_nodes(&cfe->v4l2_dev); in cfe_probe_complete()
2173 cfe_err(cfe, "Unable to register subdev nodes.\n"); in cfe_probe_complete()
2180 cfe_unregister_nodes(cfe); in cfe_probe_complete()
2188 struct cfe_device *cfe = to_cfe_device(notifier->v4l2_dev); in cfe_async_bound() local
2190 if (cfe->source_sd) { in cfe_async_bound()
2191 cfe_err(cfe, "Rejecting subdev %s (Already set!!)", in cfe_async_bound()
2192 subdev->name); in cfe_async_bound()
2196 cfe->source_sd = subdev; in cfe_async_bound()
2198 cfe_dbg(cfe, "Using source %s for capture\n", subdev->name); in cfe_async_bound()
2205 struct cfe_device *cfe = to_cfe_device(notifier->v4l2_dev); in cfe_async_complete() local
2207 return cfe_probe_complete(cfe); in cfe_async_complete()
2215 static int cfe_register_async_nf(struct cfe_device *cfe) in cfe_register_async_nf() argument
2217 struct platform_device *pdev = cfe->pdev; in cfe_register_async_nf()
2223 local_ep_fwnode = fwnode_graph_get_endpoint_by_id(pdev->dev.fwnode, 0, in cfe_register_async_nf()
2226 cfe_err(cfe, "Failed to find local endpoint fwnode\n"); in cfe_register_async_nf()
2227 return -ENODEV; in cfe_register_async_nf()
2233 cfe_err(cfe, "Failed to find remote endpoint fwnode\n"); in cfe_register_async_nf()
2240 cfe_err(cfe, "Data lanes reordering not supported\n"); in cfe_register_async_nf()
2241 ret = -EINVAL; in cfe_register_async_nf()
2246 cfe->csi2.dphy.max_lanes = ep.bus.mipi_csi2.num_data_lanes; in cfe_register_async_nf()
2247 cfe->csi2.bus_flags = ep.bus.mipi_csi2.flags; in cfe_register_async_nf()
2250 v4l2_async_nf_init(&cfe->notifier, &cfe->v4l2_dev); in cfe_register_async_nf()
2251 cfe->notifier.ops = &cfe_async_ops; in cfe_register_async_nf()
2253 asd = v4l2_async_nf_add_fwnode_remote(&cfe->notifier, local_ep_fwnode, in cfe_register_async_nf()
2257 cfe_err(cfe, "Error adding subdevice: %d\n", ret); in cfe_register_async_nf()
2261 ret = v4l2_async_nf_register(&cfe->notifier); in cfe_register_async_nf()
2263 cfe_err(cfe, "Error registering async notifier: %d\n", ret); in cfe_register_async_nf()
2272 v4l2_async_nf_cleanup(&cfe->notifier); in cfe_register_async_nf()
2281 struct cfe_device *cfe; in cfe_probe() local
2285 cfe = kzalloc(sizeof(*cfe), GFP_KERNEL); in cfe_probe()
2286 if (!cfe) in cfe_probe()
2287 return -ENOMEM; in cfe_probe()
2289 platform_set_drvdata(pdev, cfe); in cfe_probe()
2291 kref_init(&cfe->kref); in cfe_probe()
2292 cfe->pdev = pdev; in cfe_probe()
2293 cfe->fe_csi2_channel = -1; in cfe_probe()
2294 spin_lock_init(&cfe->state_lock); in cfe_probe()
2296 cfe->csi2.base = devm_platform_ioremap_resource(pdev, 0); in cfe_probe()
2297 if (IS_ERR(cfe->csi2.base)) { in cfe_probe()
2298 dev_err(&pdev->dev, "Failed to get dma io block\n"); in cfe_probe()
2299 ret = PTR_ERR(cfe->csi2.base); in cfe_probe()
2303 cfe->csi2.dphy.base = devm_platform_ioremap_resource(pdev, 1); in cfe_probe()
2304 if (IS_ERR(cfe->csi2.dphy.base)) { in cfe_probe()
2305 dev_err(&pdev->dev, "Failed to get host io block\n"); in cfe_probe()
2306 ret = PTR_ERR(cfe->csi2.dphy.base); in cfe_probe()
2310 cfe->mipi_cfg_base = devm_platform_ioremap_resource(pdev, 2); in cfe_probe()
2311 if (IS_ERR(cfe->mipi_cfg_base)) { in cfe_probe()
2312 dev_err(&pdev->dev, "Failed to get mipi cfg io block\n"); in cfe_probe()
2313 ret = PTR_ERR(cfe->mipi_cfg_base); in cfe_probe()
2317 cfe->fe.base = devm_platform_ioremap_resource(pdev, 3); in cfe_probe()
2318 if (IS_ERR(cfe->fe.base)) { in cfe_probe()
2319 dev_err(&pdev->dev, "Failed to get pisp fe io block\n"); in cfe_probe()
2320 ret = PTR_ERR(cfe->fe.base); in cfe_probe()
2326 ret = -EINVAL; in cfe_probe()
2330 ret = devm_request_irq(&pdev->dev, ret, cfe_isr, 0, "rp1-cfe", cfe); in cfe_probe()
2332 dev_err(&pdev->dev, "Unable to request interrupt\n"); in cfe_probe()
2333 ret = -EINVAL; in cfe_probe()
2337 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in cfe_probe()
2339 dev_err(&pdev->dev, "DMA enable failed\n"); in cfe_probe()
2343 ret = vb2_dma_contig_set_max_seg_size(&pdev->dev, UINT_MAX); in cfe_probe()
2348 cfe->clk = devm_clk_get(&pdev->dev, NULL); in cfe_probe()
2349 if (IS_ERR(cfe->clk)) { in cfe_probe()
2350 ret = dev_err_probe(&pdev->dev, PTR_ERR(cfe->clk), in cfe_probe()
2355 cfe->mdev.dev = &pdev->dev; in cfe_probe()
2356 cfe->mdev.ops = &cfe_media_device_ops; in cfe_probe()
2357 strscpy(cfe->mdev.model, CFE_MODULE_NAME, sizeof(cfe->mdev.model)); in cfe_probe()
2358 strscpy(cfe->mdev.serial, "", sizeof(cfe->mdev.serial)); in cfe_probe()
2359 snprintf(cfe->mdev.bus_info, sizeof(cfe->mdev.bus_info), "platform:%s", in cfe_probe()
2360 dev_name(&pdev->dev)); in cfe_probe()
2362 media_device_init(&cfe->mdev); in cfe_probe()
2364 cfe->v4l2_dev.mdev = &cfe->mdev; in cfe_probe()
2366 ret = v4l2_device_register(&pdev->dev, &cfe->v4l2_dev); in cfe_probe()
2368 cfe_err(cfe, "Unable to register v4l2 device.\n"); in cfe_probe()
2372 snprintf(debugfs_name, sizeof(debugfs_name), "rp1-cfe:%s", in cfe_probe()
2373 dev_name(&pdev->dev)); in cfe_probe()
2374 cfe->debugfs = debugfs_create_dir(debugfs_name, NULL); in cfe_probe()
2375 debugfs_create_file("regs", 0440, cfe->debugfs, cfe, in cfe_probe()
2379 pm_runtime_enable(&pdev->dev); in cfe_probe()
2381 ret = pm_runtime_resume_and_get(&cfe->pdev->dev); in cfe_probe()
2385 cfe->csi2.v4l2_dev = &cfe->v4l2_dev; in cfe_probe()
2386 ret = csi2_init(&cfe->csi2, cfe->debugfs); in cfe_probe()
2388 cfe_err(cfe, "Failed to init csi2 (%d)\n", ret); in cfe_probe()
2392 cfe->fe.v4l2_dev = &cfe->v4l2_dev; in cfe_probe()
2393 ret = pisp_fe_init(&cfe->fe, cfe->debugfs); in cfe_probe()
2395 cfe_err(cfe, "Failed to init pisp fe (%d)\n", ret); in cfe_probe()
2399 cfe->mdev.hw_revision = cfe->fe.hw_revision; in cfe_probe()
2400 ret = media_device_register(&cfe->mdev); in cfe_probe()
2402 cfe_err(cfe, "Unable to register media-controller device.\n"); in cfe_probe()
2406 ret = cfe_register_async_nf(cfe); in cfe_probe()
2408 cfe_err(cfe, "Failed to connect subdevs\n"); in cfe_probe()
2412 pm_runtime_put(&cfe->pdev->dev); in cfe_probe()
2417 media_device_unregister(&cfe->mdev); in cfe_probe()
2419 pisp_fe_uninit(&cfe->fe); in cfe_probe()
2421 csi2_uninit(&cfe->csi2); in cfe_probe()
2423 pm_runtime_put(&cfe->pdev->dev); in cfe_probe()
2425 pm_runtime_disable(&pdev->dev); in cfe_probe()
2426 debugfs_remove(cfe->debugfs); in cfe_probe()
2427 v4l2_device_unregister(&cfe->v4l2_dev); in cfe_probe()
2429 cfe_put(cfe); in cfe_probe()
2436 struct cfe_device *cfe = platform_get_drvdata(pdev); in cfe_remove() local
2438 debugfs_remove(cfe->debugfs); in cfe_remove()
2440 v4l2_async_nf_unregister(&cfe->notifier); in cfe_remove()
2441 v4l2_async_nf_cleanup(&cfe->notifier); in cfe_remove()
2443 media_device_unregister(&cfe->mdev); in cfe_remove()
2444 cfe_unregister_nodes(cfe); in cfe_remove()
2446 pisp_fe_uninit(&cfe->fe); in cfe_remove()
2447 csi2_uninit(&cfe->csi2); in cfe_remove()
2449 pm_runtime_disable(&pdev->dev); in cfe_remove()
2451 v4l2_device_unregister(&cfe->v4l2_dev); in cfe_remove()
2453 cfe_put(cfe); in cfe_remove()
2459 struct cfe_device *cfe = platform_get_drvdata(pdev); in cfe_runtime_suspend() local
2461 clk_disable_unprepare(cfe->clk); in cfe_runtime_suspend()
2469 struct cfe_device *cfe = platform_get_drvdata(pdev); in cfe_runtime_resume() local
2472 ret = clk_prepare_enable(cfe->clk); in cfe_runtime_resume()
2488 { .compatible = "raspberrypi,rp1-cfe" },
2507 MODULE_DESCRIPTION("Raspberry Pi RP1 Camera Front End driver");