1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for STM32 Digital Camera Memory Interface Pixel Processor
4  *
5  * Copyright (C) STMicroelectronics SA 2023
6  * Authors: Hugues Fruchet <[email protected]>
7  *          Alain Volmat <[email protected]>
8  *          for STMicroelectronics.
9  */
10 
11 #include <linux/iopoll.h>
12 #include <linux/pm_runtime.h>
13 #include <media/v4l2-ioctl.h>
14 #include <media/v4l2-mc.h>
15 #include <media/videobuf2-core.h>
16 #include <media/videobuf2-dma-contig.h>
17 
18 #include "dcmipp-common.h"
19 
20 #define DCMIPP_PRSR		0x1f8
21 #define DCMIPP_CMIER		0x3f0
22 #define DCMIPP_CMIER_P0FRAMEIE	BIT(9)
23 #define DCMIPP_CMIER_P0VSYNCIE	BIT(10)
24 #define DCMIPP_CMIER_P0OVRIE	BIT(15)
25 #define DCMIPP_CMIER_P0ALL	(DCMIPP_CMIER_P0VSYNCIE |\
26 				 DCMIPP_CMIER_P0FRAMEIE |\
27 				 DCMIPP_CMIER_P0OVRIE)
28 #define DCMIPP_CMSR1		0x3f4
29 #define DCMIPP_CMSR2		0x3f8
30 #define DCMIPP_CMSR2_P0FRAMEF	BIT(9)
31 #define DCMIPP_CMSR2_P0VSYNCF	BIT(10)
32 #define DCMIPP_CMSR2_P0OVRF	BIT(15)
33 #define DCMIPP_CMFCR		0x3fc
34 #define DCMIPP_P0FSCR		0x404
35 #define DCMIPP_P0FSCR_PIPEN	BIT(31)
36 #define DCMIPP_P0FCTCR		0x500
37 #define DCMIPP_P0FCTCR_CPTREQ	BIT(3)
38 #define DCMIPP_P0DCCNTR		0x5b0
39 #define DCMIPP_P0DCLMTR		0x5b4
40 #define DCMIPP_P0DCLMTR_ENABLE	BIT(31)
41 #define DCMIPP_P0DCLMTR_LIMIT_MASK	GENMASK(23, 0)
42 #define DCMIPP_P0PPM0AR1	0x5c4
43 #define DCMIPP_P0SR		0x5f8
44 #define DCMIPP_P0SR_CPTACT	BIT(23)
45 
46 struct dcmipp_bytecap_pix_map {
47 	unsigned int code;
48 	u32 pixelformat;
49 };
50 
51 #define PIXMAP_MBUS_PFMT(mbus, fmt)			\
52 	{						\
53 		.code = MEDIA_BUS_FMT_##mbus,		\
54 		.pixelformat = V4L2_PIX_FMT_##fmt	\
55 	}
56 
57 static const struct dcmipp_bytecap_pix_map dcmipp_bytecap_pix_map_list[] = {
58 	PIXMAP_MBUS_PFMT(RGB565_2X8_LE, RGB565),
59 	PIXMAP_MBUS_PFMT(RGB565_1X16, RGB565),
60 	PIXMAP_MBUS_PFMT(YUYV8_2X8, YUYV),
61 	PIXMAP_MBUS_PFMT(YUYV8_1X16, YUYV),
62 	PIXMAP_MBUS_PFMT(YVYU8_2X8, YVYU),
63 	PIXMAP_MBUS_PFMT(YVYU8_1X16, YVYU),
64 	PIXMAP_MBUS_PFMT(UYVY8_2X8, UYVY),
65 	PIXMAP_MBUS_PFMT(UYVY8_1X16, UYVY),
66 	PIXMAP_MBUS_PFMT(VYUY8_2X8, VYUY),
67 	PIXMAP_MBUS_PFMT(VYUY8_1X16, VYUY),
68 	PIXMAP_MBUS_PFMT(Y8_1X8, GREY),
69 	PIXMAP_MBUS_PFMT(SBGGR8_1X8, SBGGR8),
70 	PIXMAP_MBUS_PFMT(SGBRG8_1X8, SGBRG8),
71 	PIXMAP_MBUS_PFMT(SGRBG8_1X8, SGRBG8),
72 	PIXMAP_MBUS_PFMT(SRGGB8_1X8, SRGGB8),
73 	PIXMAP_MBUS_PFMT(SBGGR10_1X10, SBGGR10),
74 	PIXMAP_MBUS_PFMT(SGBRG10_1X10, SGBRG10),
75 	PIXMAP_MBUS_PFMT(SGRBG10_1X10, SGRBG10),
76 	PIXMAP_MBUS_PFMT(SRGGB10_1X10, SRGGB10),
77 	PIXMAP_MBUS_PFMT(SBGGR12_1X12, SBGGR12),
78 	PIXMAP_MBUS_PFMT(SGBRG12_1X12, SGBRG12),
79 	PIXMAP_MBUS_PFMT(SGRBG12_1X12, SGRBG12),
80 	PIXMAP_MBUS_PFMT(SRGGB12_1X12, SRGGB12),
81 	PIXMAP_MBUS_PFMT(SBGGR14_1X14, SBGGR14),
82 	PIXMAP_MBUS_PFMT(SGBRG14_1X14, SGBRG14),
83 	PIXMAP_MBUS_PFMT(SGRBG14_1X14, SGRBG14),
84 	PIXMAP_MBUS_PFMT(SRGGB14_1X14, SRGGB14),
85 	PIXMAP_MBUS_PFMT(JPEG_1X8, JPEG),
86 };
87 
88 static const struct dcmipp_bytecap_pix_map *
dcmipp_bytecap_pix_map_by_pixelformat(u32 pixelformat)89 dcmipp_bytecap_pix_map_by_pixelformat(u32 pixelformat)
90 {
91 	unsigned int i;
92 
93 	for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
94 		if (dcmipp_bytecap_pix_map_list[i].pixelformat == pixelformat)
95 			return &dcmipp_bytecap_pix_map_list[i];
96 	}
97 
98 	return NULL;
99 }
100 
101 struct dcmipp_buf {
102 	struct vb2_v4l2_buffer	vb;
103 	bool			prepared;
104 	dma_addr_t		addr;
105 	size_t			size;
106 	struct list_head	list;
107 };
108 
109 enum dcmipp_state {
110 	DCMIPP_STOPPED = 0,
111 	DCMIPP_WAIT_FOR_BUFFER,
112 	DCMIPP_RUNNING,
113 };
114 
115 struct dcmipp_bytecap_device {
116 	struct dcmipp_ent_device ved;
117 	struct video_device vdev;
118 	struct device *dev;
119 	struct v4l2_pix_format format;
120 	struct vb2_queue queue;
121 	struct list_head buffers;
122 	/*
123 	 * Protects concurrent calls of buf queue / irq handler
124 	 * and buffer handling related variables / lists
125 	 */
126 	spinlock_t irqlock;
127 	/* mutex used as vdev and queue lock */
128 	struct mutex lock;
129 	u32 sequence;
130 	struct media_pipeline pipe;
131 	struct v4l2_subdev *s_subdev;
132 	u32 s_subdev_pad_nb;
133 
134 	enum dcmipp_state state;
135 
136 	/*
137 	 * DCMIPP driver is handling 2 buffers
138 	 * active: buffer into which DCMIPP is currently writing into
139 	 * next: buffer given to the DCMIPP and which will become
140 	 *       automatically active on next VSYNC
141 	 */
142 	struct dcmipp_buf *active, *next;
143 
144 	void __iomem *regs;
145 
146 	u32 cmier;
147 	u32 cmsr2;
148 
149 	struct {
150 		u32 errors;
151 		u32 limit;
152 		u32 overrun;
153 		u32 buffers;
154 		u32 vsync;
155 		u32 frame;
156 		u32 it;
157 		u32 underrun;
158 		u32 nactive;
159 	} count;
160 };
161 
162 static const struct v4l2_pix_format fmt_default = {
163 	.width = DCMIPP_FMT_WIDTH_DEFAULT,
164 	.height = DCMIPP_FMT_HEIGHT_DEFAULT,
165 	.pixelformat = V4L2_PIX_FMT_RGB565,
166 	.field = V4L2_FIELD_NONE,
167 	.bytesperline = DCMIPP_FMT_WIDTH_DEFAULT * 2,
168 	.sizeimage = DCMIPP_FMT_WIDTH_DEFAULT * DCMIPP_FMT_HEIGHT_DEFAULT * 2,
169 	.colorspace = DCMIPP_COLORSPACE_DEFAULT,
170 	.ycbcr_enc = DCMIPP_YCBCR_ENC_DEFAULT,
171 	.quantization = DCMIPP_QUANTIZATION_DEFAULT,
172 	.xfer_func = DCMIPP_XFER_FUNC_DEFAULT,
173 };
174 
dcmipp_bytecap_querycap(struct file * file,void * priv,struct v4l2_capability * cap)175 static int dcmipp_bytecap_querycap(struct file *file, void *priv,
176 				   struct v4l2_capability *cap)
177 {
178 	strscpy(cap->driver, DCMIPP_PDEV_NAME, sizeof(cap->driver));
179 	strscpy(cap->card, KBUILD_MODNAME, sizeof(cap->card));
180 
181 	return 0;
182 }
183 
dcmipp_bytecap_g_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)184 static int dcmipp_bytecap_g_fmt_vid_cap(struct file *file, void *priv,
185 					struct v4l2_format *f)
186 {
187 	struct dcmipp_bytecap_device *vcap = video_drvdata(file);
188 
189 	f->fmt.pix = vcap->format;
190 
191 	return 0;
192 }
193 
dcmipp_bytecap_try_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)194 static int dcmipp_bytecap_try_fmt_vid_cap(struct file *file, void *priv,
195 					  struct v4l2_format *f)
196 {
197 	struct dcmipp_bytecap_device *vcap = video_drvdata(file);
198 	struct v4l2_pix_format *format = &f->fmt.pix;
199 	const struct dcmipp_bytecap_pix_map *vpix;
200 	u32 in_w, in_h;
201 
202 	/* Don't accept a pixelformat that is not on the table */
203 	vpix = dcmipp_bytecap_pix_map_by_pixelformat(format->pixelformat);
204 	if (!vpix)
205 		format->pixelformat = fmt_default.pixelformat;
206 
207 	/* Adjust width & height */
208 	in_w = format->width;
209 	in_h = format->height;
210 	v4l_bound_align_image(&format->width, DCMIPP_FRAME_MIN_WIDTH,
211 			      DCMIPP_FRAME_MAX_WIDTH, 0, &format->height,
212 			      DCMIPP_FRAME_MIN_HEIGHT, DCMIPP_FRAME_MAX_HEIGHT,
213 			      0, 0);
214 	if (format->width != in_w || format->height != in_h)
215 		dev_dbg(vcap->dev, "resolution updated: %dx%d -> %dx%d\n",
216 			in_w, in_h, format->width, format->height);
217 
218 	if (format->pixelformat == V4L2_PIX_FMT_JPEG) {
219 		format->bytesperline = format->width;
220 		format->sizeimage = format->bytesperline * format->height;
221 	} else {
222 		v4l2_fill_pixfmt(format, format->pixelformat,
223 				 format->width, format->height);
224 	}
225 
226 	if (format->field == V4L2_FIELD_ANY)
227 		format->field = fmt_default.field;
228 
229 	dcmipp_colorimetry_clamp(format);
230 
231 	return 0;
232 }
233 
dcmipp_bytecap_s_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)234 static int dcmipp_bytecap_s_fmt_vid_cap(struct file *file, void *priv,
235 					struct v4l2_format *f)
236 {
237 	struct dcmipp_bytecap_device *vcap = video_drvdata(file);
238 	int ret;
239 
240 	/* Do not change the format while stream is on */
241 	if (vb2_is_busy(&vcap->queue))
242 		return -EBUSY;
243 
244 	ret = dcmipp_bytecap_try_fmt_vid_cap(file, priv, f);
245 	if (ret)
246 		return ret;
247 
248 	dev_dbg(vcap->dev, "%s: format update: old:%ux%u (0x%p4cc, %u, %u, %u, %u) new:%ux%d (0x%p4cc, %u, %u, %u, %u)\n",
249 		vcap->vdev.name,
250 		/* old */
251 		vcap->format.width, vcap->format.height,
252 		&vcap->format.pixelformat, vcap->format.colorspace,
253 		vcap->format.quantization, vcap->format.xfer_func,
254 		vcap->format.ycbcr_enc,
255 		/* new */
256 		f->fmt.pix.width, f->fmt.pix.height,
257 		&f->fmt.pix.pixelformat, f->fmt.pix.colorspace,
258 		f->fmt.pix.quantization, f->fmt.pix.xfer_func,
259 		f->fmt.pix.ycbcr_enc);
260 
261 	vcap->format = f->fmt.pix;
262 
263 	return 0;
264 }
265 
dcmipp_bytecap_enum_fmt_vid_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)266 static int dcmipp_bytecap_enum_fmt_vid_cap(struct file *file, void *priv,
267 					   struct v4l2_fmtdesc *f)
268 {
269 	const struct dcmipp_bytecap_pix_map *vpix;
270 	unsigned int index = f->index;
271 	unsigned int i, prev_pixelformat = 0;
272 
273 	/*
274 	 * List up all formats (or only ones matching f->mbus_code), taking
275 	 * care of removing duplicated entries (due to support of both
276 	 * parallel & csi 16 bits formats
277 	 */
278 	for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
279 		vpix = &dcmipp_bytecap_pix_map_list[i];
280 		/* Skip formats not matching requested mbus code */
281 		if (f->mbus_code && vpix->code != f->mbus_code)
282 			continue;
283 
284 		/* Skip duplicated pixelformat */
285 		if (vpix->pixelformat == prev_pixelformat)
286 			continue;
287 
288 		prev_pixelformat = vpix->pixelformat;
289 
290 		if (index == 0)
291 			break;
292 
293 		index--;
294 	}
295 
296 	if (i == ARRAY_SIZE(dcmipp_bytecap_pix_map_list))
297 		return -EINVAL;
298 
299 	f->pixelformat = vpix->pixelformat;
300 
301 	return 0;
302 }
303 
dcmipp_bytecap_enum_framesizes(struct file * file,void * fh,struct v4l2_frmsizeenum * fsize)304 static int dcmipp_bytecap_enum_framesizes(struct file *file, void *fh,
305 					  struct v4l2_frmsizeenum *fsize)
306 {
307 	const struct dcmipp_bytecap_pix_map *vpix;
308 
309 	if (fsize->index)
310 		return -EINVAL;
311 
312 	/* Only accept code in the pix map table */
313 	vpix = dcmipp_bytecap_pix_map_by_pixelformat(fsize->pixel_format);
314 	if (!vpix)
315 		return -EINVAL;
316 
317 	fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
318 	fsize->stepwise.min_width = DCMIPP_FRAME_MIN_WIDTH;
319 	fsize->stepwise.max_width = DCMIPP_FRAME_MAX_WIDTH;
320 	fsize->stepwise.min_height = DCMIPP_FRAME_MIN_HEIGHT;
321 	fsize->stepwise.max_height = DCMIPP_FRAME_MAX_HEIGHT;
322 	fsize->stepwise.step_width = 1;
323 	fsize->stepwise.step_height = 1;
324 
325 	return 0;
326 }
327 
328 static const struct v4l2_file_operations dcmipp_bytecap_fops = {
329 	.owner		= THIS_MODULE,
330 	.open		= v4l2_fh_open,
331 	.release	= vb2_fop_release,
332 	.read           = vb2_fop_read,
333 	.poll		= vb2_fop_poll,
334 	.unlocked_ioctl = video_ioctl2,
335 	.mmap           = vb2_fop_mmap,
336 };
337 
338 static const struct v4l2_ioctl_ops dcmipp_bytecap_ioctl_ops = {
339 	.vidioc_querycap = dcmipp_bytecap_querycap,
340 
341 	.vidioc_g_fmt_vid_cap = dcmipp_bytecap_g_fmt_vid_cap,
342 	.vidioc_s_fmt_vid_cap = dcmipp_bytecap_s_fmt_vid_cap,
343 	.vidioc_try_fmt_vid_cap = dcmipp_bytecap_try_fmt_vid_cap,
344 	.vidioc_enum_fmt_vid_cap = dcmipp_bytecap_enum_fmt_vid_cap,
345 	.vidioc_enum_framesizes = dcmipp_bytecap_enum_framesizes,
346 
347 	.vidioc_reqbufs = vb2_ioctl_reqbufs,
348 	.vidioc_create_bufs = vb2_ioctl_create_bufs,
349 	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
350 	.vidioc_querybuf = vb2_ioctl_querybuf,
351 	.vidioc_qbuf = vb2_ioctl_qbuf,
352 	.vidioc_dqbuf = vb2_ioctl_dqbuf,
353 	.vidioc_expbuf = vb2_ioctl_expbuf,
354 	.vidioc_streamon = vb2_ioctl_streamon,
355 	.vidioc_streamoff = vb2_ioctl_streamoff,
356 };
357 
dcmipp_start_capture(struct dcmipp_bytecap_device * vcap,struct dcmipp_buf * buf)358 static void dcmipp_start_capture(struct dcmipp_bytecap_device *vcap,
359 				 struct dcmipp_buf *buf)
360 {
361 	/* Set buffer address */
362 	reg_write(vcap, DCMIPP_P0PPM0AR1, buf->addr);
363 
364 	/* Set buffer size */
365 	reg_write(vcap, DCMIPP_P0DCLMTR, DCMIPP_P0DCLMTR_ENABLE |
366 		  ((buf->size / 4) & DCMIPP_P0DCLMTR_LIMIT_MASK));
367 
368 	/* Capture request */
369 	reg_set(vcap, DCMIPP_P0FCTCR, DCMIPP_P0FCTCR_CPTREQ);
370 }
371 
dcmipp_bytecap_all_buffers_done(struct dcmipp_bytecap_device * vcap,enum vb2_buffer_state state)372 static void dcmipp_bytecap_all_buffers_done(struct dcmipp_bytecap_device *vcap,
373 					    enum vb2_buffer_state state)
374 {
375 	struct dcmipp_buf *buf, *node;
376 
377 	list_for_each_entry_safe(buf, node, &vcap->buffers, list) {
378 		list_del_init(&buf->list);
379 		vb2_buffer_done(&buf->vb.vb2_buf, state);
380 	}
381 }
382 
dcmipp_bytecap_start_streaming(struct vb2_queue * vq,unsigned int count)383 static int dcmipp_bytecap_start_streaming(struct vb2_queue *vq,
384 					  unsigned int count)
385 {
386 	struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vq);
387 	struct media_entity *entity = &vcap->vdev.entity;
388 	struct dcmipp_buf *buf;
389 	struct media_pad *pad;
390 	int ret;
391 
392 	vcap->sequence = 0;
393 	memset(&vcap->count, 0, sizeof(vcap->count));
394 
395 	/*
396 	 * Get source subdev - since link is IMMUTABLE, pointer is cached
397 	 * within the dcmipp_bytecap_device structure
398 	 */
399 	if (!vcap->s_subdev) {
400 		pad = media_pad_remote_pad_first(&vcap->vdev.entity.pads[0]);
401 		if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
402 			return -EINVAL;
403 		vcap->s_subdev = media_entity_to_v4l2_subdev(pad->entity);
404 		vcap->s_subdev_pad_nb = pad->index;
405 	}
406 
407 	ret = pm_runtime_resume_and_get(vcap->dev);
408 	if (ret < 0) {
409 		dev_err(vcap->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
410 			__func__, ret);
411 		goto err_buffer_done;
412 	}
413 
414 	ret = media_pipeline_start(entity->pads, &vcap->pipe);
415 	if (ret) {
416 		dev_dbg(vcap->dev, "%s: Failed to start streaming, media pipeline start error (%d)\n",
417 			__func__, ret);
418 		goto err_pm_put;
419 	}
420 
421 	ret = v4l2_subdev_enable_streams(vcap->s_subdev,
422 					 vcap->s_subdev_pad_nb, BIT_ULL(0));
423 	if (ret)
424 		goto err_media_pipeline_stop;
425 
426 	spin_lock_irq(&vcap->irqlock);
427 
428 	/* Enable pipe at the end of programming */
429 	reg_set(vcap, DCMIPP_P0FSCR, DCMIPP_P0FSCR_PIPEN);
430 
431 	/*
432 	 * vb2 framework guarantee that we have at least 'min_queued_buffers'
433 	 * buffers in the list at this moment
434 	 */
435 	vcap->next = list_first_entry(&vcap->buffers, typeof(*buf), list);
436 	dev_dbg(vcap->dev, "Start with next [%d] %p phy=%pad\n",
437 		vcap->next->vb.vb2_buf.index, vcap->next, &vcap->next->addr);
438 
439 	dcmipp_start_capture(vcap, vcap->next);
440 
441 	/* Enable interruptions */
442 	vcap->cmier |= DCMIPP_CMIER_P0ALL;
443 	reg_set(vcap, DCMIPP_CMIER, vcap->cmier);
444 
445 	vcap->state = DCMIPP_RUNNING;
446 
447 	spin_unlock_irq(&vcap->irqlock);
448 
449 	return 0;
450 
451 err_media_pipeline_stop:
452 	media_pipeline_stop(entity->pads);
453 err_pm_put:
454 	pm_runtime_put(vcap->dev);
455 err_buffer_done:
456 	spin_lock_irq(&vcap->irqlock);
457 	/*
458 	 * Return all buffers to vb2 in QUEUED state.
459 	 * This will give ownership back to userspace
460 	 */
461 	dcmipp_bytecap_all_buffers_done(vcap, VB2_BUF_STATE_QUEUED);
462 	vcap->active = NULL;
463 	spin_unlock_irq(&vcap->irqlock);
464 
465 	return ret;
466 }
467 
dcmipp_dump_status(struct dcmipp_bytecap_device * vcap)468 static void dcmipp_dump_status(struct dcmipp_bytecap_device *vcap)
469 {
470 	struct device *dev = vcap->dev;
471 
472 	dev_dbg(dev, "[DCMIPP_PRSR]  =%#10.8x\n", reg_read(vcap, DCMIPP_PRSR));
473 	dev_dbg(dev, "[DCMIPP_P0SR] =%#10.8x\n", reg_read(vcap, DCMIPP_P0SR));
474 	dev_dbg(dev, "[DCMIPP_P0DCCNTR]=%#10.8x\n",
475 		reg_read(vcap, DCMIPP_P0DCCNTR));
476 	dev_dbg(dev, "[DCMIPP_CMSR1] =%#10.8x\n", reg_read(vcap, DCMIPP_CMSR1));
477 	dev_dbg(dev, "[DCMIPP_CMSR2] =%#10.8x\n", reg_read(vcap, DCMIPP_CMSR2));
478 }
479 
480 /*
481  * Stop the stream engine. Any remaining buffers in the stream queue are
482  * dequeued and passed on to the vb2 framework marked as STATE_ERROR.
483  */
dcmipp_bytecap_stop_streaming(struct vb2_queue * vq)484 static void dcmipp_bytecap_stop_streaming(struct vb2_queue *vq)
485 {
486 	struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vq);
487 	int ret;
488 	u32 status;
489 
490 	ret = v4l2_subdev_disable_streams(vcap->s_subdev,
491 					  vcap->s_subdev_pad_nb, BIT_ULL(0));
492 	if (ret)
493 		dev_warn(vcap->dev, "Failed to disable stream\n");
494 
495 	/* Stop the media pipeline */
496 	media_pipeline_stop(vcap->vdev.entity.pads);
497 
498 	/* Disable interruptions */
499 	reg_clear(vcap, DCMIPP_CMIER, vcap->cmier);
500 
501 	/* Stop capture */
502 	reg_clear(vcap, DCMIPP_P0FCTCR, DCMIPP_P0FCTCR_CPTREQ);
503 
504 	/* Wait until CPTACT become 0 */
505 	ret = readl_relaxed_poll_timeout(vcap->regs + DCMIPP_P0SR, status,
506 					 !(status & DCMIPP_P0SR_CPTACT),
507 					 20 * USEC_PER_MSEC,
508 					 1000 * USEC_PER_MSEC);
509 	if (ret)
510 		dev_warn(vcap->dev, "Timeout when stopping\n");
511 
512 	/* Disable pipe */
513 	reg_clear(vcap, DCMIPP_P0FSCR, DCMIPP_P0FSCR_PIPEN);
514 
515 	spin_lock_irq(&vcap->irqlock);
516 
517 	/* Return all queued buffers to vb2 in ERROR state */
518 	dcmipp_bytecap_all_buffers_done(vcap, VB2_BUF_STATE_ERROR);
519 	INIT_LIST_HEAD(&vcap->buffers);
520 
521 	vcap->active = NULL;
522 	vcap->state = DCMIPP_STOPPED;
523 
524 	spin_unlock_irq(&vcap->irqlock);
525 
526 	dcmipp_dump_status(vcap);
527 
528 	pm_runtime_put(vcap->dev);
529 
530 	if (vcap->count.errors)
531 		dev_warn(vcap->dev, "Some errors found while streaming: errors=%d (overrun=%d, limit=%d, nactive=%d), underrun=%d, buffers=%d\n",
532 			 vcap->count.errors, vcap->count.overrun,
533 			 vcap->count.limit, vcap->count.nactive,
534 			 vcap->count.underrun, vcap->count.buffers);
535 }
536 
dcmipp_bytecap_buf_prepare(struct vb2_buffer * vb)537 static int dcmipp_bytecap_buf_prepare(struct vb2_buffer *vb)
538 {
539 	struct dcmipp_bytecap_device *vcap =  vb2_get_drv_priv(vb->vb2_queue);
540 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
541 	struct dcmipp_buf *buf = container_of(vbuf, struct dcmipp_buf, vb);
542 	unsigned long size;
543 
544 	size = vcap->format.sizeimage;
545 
546 	if (vb2_plane_size(vb, 0) < size) {
547 		dev_err(vcap->dev, "%s data will not fit into plane (%lu < %lu)\n",
548 			__func__, vb2_plane_size(vb, 0), size);
549 		return -EINVAL;
550 	}
551 
552 	vb2_set_plane_payload(vb, 0, size);
553 
554 	if (!buf->prepared) {
555 		/* Get memory addresses */
556 		buf->addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
557 		buf->size = vb2_plane_size(&buf->vb.vb2_buf, 0);
558 		buf->prepared = true;
559 
560 		vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
561 
562 		dev_dbg(vcap->dev, "Setup [%d] phy=%pad size=%zu\n",
563 			vb->index, &buf->addr, buf->size);
564 	}
565 
566 	return 0;
567 }
568 
dcmipp_bytecap_buf_queue(struct vb2_buffer * vb2_buf)569 static void dcmipp_bytecap_buf_queue(struct vb2_buffer *vb2_buf)
570 {
571 	struct dcmipp_bytecap_device *vcap =
572 		vb2_get_drv_priv(vb2_buf->vb2_queue);
573 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2_buf);
574 	struct dcmipp_buf *buf = container_of(vbuf, struct dcmipp_buf, vb);
575 
576 	dev_dbg(vcap->dev, "Queue [%d] %p phy=%pad\n", buf->vb.vb2_buf.index,
577 		buf, &buf->addr);
578 
579 	spin_lock_irq(&vcap->irqlock);
580 	list_add_tail(&buf->list, &vcap->buffers);
581 
582 	if (vcap->state == DCMIPP_WAIT_FOR_BUFFER) {
583 		vcap->next = buf;
584 		dev_dbg(vcap->dev, "Restart with next [%d] %p phy=%pad\n",
585 			buf->vb.vb2_buf.index, buf, &buf->addr);
586 
587 		dcmipp_start_capture(vcap, buf);
588 
589 		vcap->state = DCMIPP_RUNNING;
590 	}
591 
592 	spin_unlock_irq(&vcap->irqlock);
593 }
594 
dcmipp_bytecap_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])595 static int dcmipp_bytecap_queue_setup(struct vb2_queue *vq,
596 				      unsigned int *nbuffers,
597 				      unsigned int *nplanes,
598 				      unsigned int sizes[],
599 				      struct device *alloc_devs[])
600 {
601 	struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vq);
602 	unsigned int size;
603 
604 	size = vcap->format.sizeimage;
605 
606 	/* Make sure the image size is large enough */
607 	if (*nplanes)
608 		return sizes[0] < vcap->format.sizeimage ? -EINVAL : 0;
609 
610 	*nplanes = 1;
611 	sizes[0] = vcap->format.sizeimage;
612 
613 	dev_dbg(vcap->dev, "Setup queue, count=%d, size=%d\n",
614 		*nbuffers, size);
615 
616 	return 0;
617 }
618 
dcmipp_bytecap_buf_init(struct vb2_buffer * vb)619 static int dcmipp_bytecap_buf_init(struct vb2_buffer *vb)
620 {
621 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
622 	struct dcmipp_buf *buf = container_of(vbuf, struct dcmipp_buf, vb);
623 
624 	INIT_LIST_HEAD(&buf->list);
625 
626 	return 0;
627 }
628 
629 static const struct vb2_ops dcmipp_bytecap_qops = {
630 	.start_streaming	= dcmipp_bytecap_start_streaming,
631 	.stop_streaming		= dcmipp_bytecap_stop_streaming,
632 	.buf_init		= dcmipp_bytecap_buf_init,
633 	.buf_prepare		= dcmipp_bytecap_buf_prepare,
634 	.buf_queue		= dcmipp_bytecap_buf_queue,
635 	.queue_setup		= dcmipp_bytecap_queue_setup,
636 };
637 
dcmipp_bytecap_release(struct video_device * vdev)638 static void dcmipp_bytecap_release(struct video_device *vdev)
639 {
640 	struct dcmipp_bytecap_device *vcap =
641 		container_of(vdev, struct dcmipp_bytecap_device, vdev);
642 
643 	dcmipp_pads_cleanup(vcap->ved.pads);
644 	mutex_destroy(&vcap->lock);
645 
646 	kfree(vcap);
647 }
648 
dcmipp_bytecap_ent_release(struct dcmipp_ent_device * ved)649 void dcmipp_bytecap_ent_release(struct dcmipp_ent_device *ved)
650 {
651 	struct dcmipp_bytecap_device *vcap =
652 		container_of(ved, struct dcmipp_bytecap_device, ved);
653 
654 	media_entity_cleanup(ved->ent);
655 	vb2_video_unregister_device(&vcap->vdev);
656 }
657 
dcmipp_buffer_done(struct dcmipp_bytecap_device * vcap,struct dcmipp_buf * buf,size_t bytesused,int err)658 static void dcmipp_buffer_done(struct dcmipp_bytecap_device *vcap,
659 			       struct dcmipp_buf *buf,
660 			       size_t bytesused,
661 			       int err)
662 {
663 	struct vb2_v4l2_buffer *vbuf;
664 
665 	list_del_init(&buf->list);
666 
667 	vbuf = &buf->vb;
668 
669 	vbuf->sequence = vcap->sequence++;
670 	vbuf->field = V4L2_FIELD_NONE;
671 	vbuf->vb2_buf.timestamp = ktime_get_ns();
672 	vb2_set_plane_payload(&vbuf->vb2_buf, 0, bytesused);
673 	vb2_buffer_done(&vbuf->vb2_buf,
674 			err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
675 	dev_dbg(vcap->dev, "Done  [%d] %p phy=%pad\n", buf->vb.vb2_buf.index,
676 		buf, &buf->addr);
677 	vcap->count.buffers++;
678 }
679 
680 /* irqlock must be held */
681 static void
dcmipp_bytecap_set_next_frame_or_stop(struct dcmipp_bytecap_device * vcap)682 dcmipp_bytecap_set_next_frame_or_stop(struct dcmipp_bytecap_device *vcap)
683 {
684 	if (!vcap->next && list_is_singular(&vcap->buffers)) {
685 		/*
686 		 * If there is no available buffer (none or a single one in the
687 		 * list while two are expected), stop the capture (effective
688 		 * for next frame). On-going frame capture will continue until
689 		 * FRAME END but no further capture will be done.
690 		 */
691 		reg_clear(vcap, DCMIPP_P0FCTCR, DCMIPP_P0FCTCR_CPTREQ);
692 
693 		dev_dbg(vcap->dev, "Capture restart is deferred to next buffer queueing\n");
694 		vcap->next = NULL;
695 		vcap->state = DCMIPP_WAIT_FOR_BUFFER;
696 		return;
697 	}
698 
699 	/* If we don't have buffer yet, pick the one after active */
700 	if (!vcap->next)
701 		vcap->next = list_next_entry(vcap->active, list);
702 
703 	/*
704 	 * Set buffer address
705 	 * This register is shadowed and will be taken into
706 	 * account on next VSYNC (start of next frame)
707 	 */
708 	reg_write(vcap, DCMIPP_P0PPM0AR1, vcap->next->addr);
709 	dev_dbg(vcap->dev, "Write [%d] %p phy=%pad\n",
710 		vcap->next->vb.vb2_buf.index, vcap->next, &vcap->next->addr);
711 }
712 
713 /* irqlock must be held */
dcmipp_bytecap_process_frame(struct dcmipp_bytecap_device * vcap,size_t bytesused)714 static void dcmipp_bytecap_process_frame(struct dcmipp_bytecap_device *vcap,
715 					 size_t bytesused)
716 {
717 	int err = 0;
718 	struct dcmipp_buf *buf = vcap->active;
719 
720 	if (!buf) {
721 		vcap->count.nactive++;
722 		vcap->count.errors++;
723 		return;
724 	}
725 
726 	if (bytesused > buf->size) {
727 		dev_dbg(vcap->dev, "frame larger than expected (%zu > %zu)\n",
728 			bytesused, buf->size);
729 		/* Clip to buffer size and return buffer to V4L2 in error */
730 		bytesused = buf->size;
731 		vcap->count.limit++;
732 		vcap->count.errors++;
733 		err = -EOVERFLOW;
734 	}
735 
736 	dcmipp_buffer_done(vcap, buf, bytesused, err);
737 	vcap->active = NULL;
738 }
739 
dcmipp_bytecap_irq_thread(int irq,void * arg)740 static irqreturn_t dcmipp_bytecap_irq_thread(int irq, void *arg)
741 {
742 	struct dcmipp_bytecap_device *vcap =
743 			container_of(arg, struct dcmipp_bytecap_device, ved);
744 	size_t bytesused = 0;
745 	u32 cmsr2;
746 
747 	spin_lock_irq(&vcap->irqlock);
748 
749 	cmsr2 = vcap->cmsr2 & vcap->cmier;
750 
751 	/*
752 	 * If we have an overrun, a frame-end will probably not be generated,
753 	 * in that case the active buffer will be recycled as next buffer by
754 	 * the VSYNC handler
755 	 */
756 	if (cmsr2 & DCMIPP_CMSR2_P0OVRF) {
757 		vcap->count.errors++;
758 		vcap->count.overrun++;
759 	}
760 
761 	if (cmsr2 & DCMIPP_CMSR2_P0FRAMEF) {
762 		vcap->count.frame++;
763 
764 		/* Read captured buffer size */
765 		bytesused = reg_read(vcap, DCMIPP_P0DCCNTR);
766 		dcmipp_bytecap_process_frame(vcap, bytesused);
767 	}
768 
769 	if (cmsr2 & DCMIPP_CMSR2_P0VSYNCF) {
770 		vcap->count.vsync++;
771 		if (vcap->state == DCMIPP_WAIT_FOR_BUFFER) {
772 			vcap->count.underrun++;
773 			goto out;
774 		}
775 
776 		/*
777 		 * On VSYNC, the previously set next buffer is going to become
778 		 * active thanks to the shadowing mechanism of the DCMIPP. In
779 		 * most of the cases, since a FRAMEEND has already come,
780 		 * pointer next is NULL since active is reset during the
781 		 * FRAMEEND handling. However, in case of framerate adjustment,
782 		 * there are more VSYNC than FRAMEEND. Thus we recycle the
783 		 * active (but not used) buffer and put it back into next.
784 		 */
785 		swap(vcap->active, vcap->next);
786 		dcmipp_bytecap_set_next_frame_or_stop(vcap);
787 	}
788 
789 out:
790 	spin_unlock_irq(&vcap->irqlock);
791 	return IRQ_HANDLED;
792 }
793 
dcmipp_bytecap_irq_callback(int irq,void * arg)794 static irqreturn_t dcmipp_bytecap_irq_callback(int irq, void *arg)
795 {
796 	struct dcmipp_bytecap_device *vcap =
797 			container_of(arg, struct dcmipp_bytecap_device, ved);
798 
799 	/* Store interrupt status register */
800 	vcap->cmsr2 = reg_read(vcap, DCMIPP_CMSR2) & vcap->cmier;
801 	vcap->count.it++;
802 
803 	/* Clear interrupt */
804 	reg_write(vcap, DCMIPP_CMFCR, vcap->cmsr2);
805 
806 	return IRQ_WAKE_THREAD;
807 }
808 
dcmipp_bytecap_link_validate(struct media_link * link)809 static int dcmipp_bytecap_link_validate(struct media_link *link)
810 {
811 	struct media_entity *entity = link->sink->entity;
812 	struct video_device *vd = media_entity_to_video_device(entity);
813 	struct dcmipp_bytecap_device *vcap = container_of(vd,
814 					struct dcmipp_bytecap_device, vdev);
815 	struct v4l2_subdev *source_sd =
816 		media_entity_to_v4l2_subdev(link->source->entity);
817 	struct v4l2_subdev_format source_fmt = {
818 		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
819 		.pad = link->source->index,
820 	};
821 	int ret, i;
822 
823 	ret = v4l2_subdev_call(source_sd, pad, get_fmt, NULL, &source_fmt);
824 	if (ret < 0)
825 		return 0;
826 
827 	if (source_fmt.format.width != vcap->format.width ||
828 	    source_fmt.format.height != vcap->format.height) {
829 		dev_err(vcap->dev, "Wrong width or height %ux%u (%ux%u expected)\n",
830 			vcap->format.width, vcap->format.height,
831 			source_fmt.format.width, source_fmt.format.height);
832 		return -EINVAL;
833 	}
834 
835 	for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
836 		if (dcmipp_bytecap_pix_map_list[i].pixelformat ==
837 			vcap->format.pixelformat &&
838 		    dcmipp_bytecap_pix_map_list[i].code ==
839 			source_fmt.format.code)
840 			break;
841 	}
842 
843 	if (i == ARRAY_SIZE(dcmipp_bytecap_pix_map_list)) {
844 		dev_err(vcap->dev, "mbus code 0x%x do not match capture device format (0x%x)\n",
845 			vcap->format.pixelformat, source_fmt.format.code);
846 		return -EINVAL;
847 	}
848 
849 	return 0;
850 }
851 
852 static const struct media_entity_operations dcmipp_bytecap_entity_ops = {
853 	.link_validate = dcmipp_bytecap_link_validate,
854 };
855 
dcmipp_bytecap_ent_init(struct device * dev,const char * entity_name,struct v4l2_device * v4l2_dev,void __iomem * regs)856 struct dcmipp_ent_device *dcmipp_bytecap_ent_init(struct device *dev,
857 						  const char *entity_name,
858 						  struct v4l2_device *v4l2_dev,
859 						  void __iomem *regs)
860 {
861 	struct dcmipp_bytecap_device *vcap;
862 	struct video_device *vdev;
863 	struct vb2_queue *q;
864 	const unsigned long pad_flag = MEDIA_PAD_FL_SINK;
865 	int ret = 0;
866 
867 	/* Allocate the dcmipp_bytecap_device struct */
868 	vcap = kzalloc(sizeof(*vcap), GFP_KERNEL);
869 	if (!vcap)
870 		return ERR_PTR(-ENOMEM);
871 
872 	/* Allocate the pads */
873 	vcap->ved.pads = dcmipp_pads_init(1, &pad_flag);
874 	if (IS_ERR(vcap->ved.pads)) {
875 		ret = PTR_ERR(vcap->ved.pads);
876 		goto err_free_vcap;
877 	}
878 
879 	/* Initialize the media entity */
880 	vcap->vdev.entity.name = entity_name;
881 	vcap->vdev.entity.function = MEDIA_ENT_F_IO_V4L;
882 	vcap->vdev.entity.ops = &dcmipp_bytecap_entity_ops;
883 	ret = media_entity_pads_init(&vcap->vdev.entity, 1, vcap->ved.pads);
884 	if (ret)
885 		goto err_clean_pads;
886 
887 	/* Initialize the lock */
888 	mutex_init(&vcap->lock);
889 
890 	/* Initialize the vb2 queue */
891 	q = &vcap->queue;
892 	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
893 	q->io_modes = VB2_MMAP | VB2_DMABUF;
894 	q->lock = &vcap->lock;
895 	q->drv_priv = vcap;
896 	q->buf_struct_size = sizeof(struct dcmipp_buf);
897 	q->ops = &dcmipp_bytecap_qops;
898 	q->mem_ops = &vb2_dma_contig_memops;
899 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
900 	q->min_queued_buffers = 1;
901 	q->dev = dev;
902 
903 	/* DCMIPP requires 16 bytes aligned buffers */
904 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
905 	if (ret) {
906 		dev_err(dev, "Failed to set DMA mask\n");
907 		goto err_mutex_destroy;
908 	}
909 
910 	ret = vb2_queue_init(q);
911 	if (ret) {
912 		dev_err(dev, "%s: vb2 queue init failed (err=%d)\n",
913 			entity_name, ret);
914 		goto err_clean_m_ent;
915 	}
916 
917 	/* Initialize buffer list and its lock */
918 	INIT_LIST_HEAD(&vcap->buffers);
919 	spin_lock_init(&vcap->irqlock);
920 
921 	/* Set default frame format */
922 	vcap->format = fmt_default;
923 
924 	/* Fill the dcmipp_ent_device struct */
925 	vcap->ved.ent = &vcap->vdev.entity;
926 	vcap->ved.handler = dcmipp_bytecap_irq_callback;
927 	vcap->ved.thread_fn = dcmipp_bytecap_irq_thread;
928 	vcap->dev = dev;
929 	vcap->regs = regs;
930 
931 	/* Initialize the video_device struct */
932 	vdev = &vcap->vdev;
933 	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
934 			    V4L2_CAP_IO_MC;
935 	vdev->release = dcmipp_bytecap_release;
936 	vdev->fops = &dcmipp_bytecap_fops;
937 	vdev->ioctl_ops = &dcmipp_bytecap_ioctl_ops;
938 	vdev->lock = &vcap->lock;
939 	vdev->queue = q;
940 	vdev->v4l2_dev = v4l2_dev;
941 	strscpy(vdev->name, entity_name, sizeof(vdev->name));
942 	video_set_drvdata(vdev, &vcap->ved);
943 
944 	/* Register the video_device with the v4l2 and the media framework */
945 	ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
946 	if (ret) {
947 		dev_err(dev, "%s: video register failed (err=%d)\n",
948 			vcap->vdev.name, ret);
949 		goto err_clean_m_ent;
950 	}
951 
952 	return &vcap->ved;
953 
954 err_clean_m_ent:
955 	media_entity_cleanup(&vcap->vdev.entity);
956 err_mutex_destroy:
957 	mutex_destroy(&vcap->lock);
958 err_clean_pads:
959 	dcmipp_pads_cleanup(vcap->ved.pads);
960 err_free_vcap:
961 	kfree(vcap);
962 
963 	return ERR_PTR(ret);
964 }
965