1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
3 */
4
5 #include <linux/bitops.h>
6 #include <linux/debugfs.h>
7 #include <linux/slab.h>
8
9 #include <drm/drm_managed.h>
10
11 #include "dpu_core_irq.h"
12 #include "dpu_kms.h"
13 #include "dpu_hw_interrupts.h"
14 #include "dpu_hw_util.h"
15 #include "dpu_hw_mdss.h"
16 #include "dpu_trace.h"
17
18 /*
19 * Register offsets in MDSS register file for the interrupt registers
20 * w.r.t. the MDP base
21 */
22 #define MDP_INTF_OFF(intf) (0x6A000 + 0x800 * (intf))
23 #define MDP_INTF_INTR_EN(intf) (MDP_INTF_OFF(intf) + 0x1c0)
24 #define MDP_INTF_INTR_STATUS(intf) (MDP_INTF_OFF(intf) + 0x1c4)
25 #define MDP_INTF_INTR_CLEAR(intf) (MDP_INTF_OFF(intf) + 0x1c8)
26 #define MDP_INTF_TEAR_OFF(intf) (0x6D700 + 0x100 * (intf))
27 #define MDP_INTF_INTR_TEAR_EN(intf) (MDP_INTF_TEAR_OFF(intf) + 0x000)
28 #define MDP_INTF_INTR_TEAR_STATUS(intf) (MDP_INTF_TEAR_OFF(intf) + 0x004)
29 #define MDP_INTF_INTR_TEAR_CLEAR(intf) (MDP_INTF_TEAR_OFF(intf) + 0x008)
30 #define MDP_AD4_OFF(ad4) (0x7C000 + 0x1000 * (ad4))
31 #define MDP_AD4_INTR_EN_OFF(ad4) (MDP_AD4_OFF(ad4) + 0x41c)
32 #define MDP_AD4_INTR_CLEAR_OFF(ad4) (MDP_AD4_OFF(ad4) + 0x424)
33 #define MDP_AD4_INTR_STATUS_OFF(ad4) (MDP_AD4_OFF(ad4) + 0x420)
34 #define MDP_INTF_REV_7xxx_OFF(intf) (0x34000 + 0x1000 * (intf))
35 #define MDP_INTF_REV_7xxx_INTR_EN(intf) (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c0)
36 #define MDP_INTF_REV_7xxx_INTR_STATUS(intf) (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c4)
37 #define MDP_INTF_REV_7xxx_INTR_CLEAR(intf) (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c8)
38 #define MDP_INTF_REV_7xxx_TEAR_OFF(intf) (0x34800 + 0x1000 * (intf))
39 #define MDP_INTF_REV_7xxx_INTR_TEAR_EN(intf) (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x000)
40 #define MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(intf) (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x004)
41 #define MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(intf) (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x008)
42
43 /**
44 * struct dpu_intr_reg - array of DPU register sets
45 * @clr_off: offset to CLEAR reg
46 * @en_off: offset to ENABLE reg
47 * @status_off: offset to STATUS reg
48 */
49 struct dpu_intr_reg {
50 u32 clr_off;
51 u32 en_off;
52 u32 status_off;
53 };
54
55 /*
56 * dpu_intr_set_legacy - List of DPU interrupt registers for DPU <= 6.x
57 */
58 static const struct dpu_intr_reg dpu_intr_set_legacy[] = {
59 [MDP_SSPP_TOP0_INTR] = {
60 INTR_CLEAR,
61 INTR_EN,
62 INTR_STATUS
63 },
64 [MDP_SSPP_TOP0_INTR2] = {
65 INTR2_CLEAR,
66 INTR2_EN,
67 INTR2_STATUS
68 },
69 [MDP_SSPP_TOP0_HIST_INTR] = {
70 HIST_INTR_CLEAR,
71 HIST_INTR_EN,
72 HIST_INTR_STATUS
73 },
74 [MDP_INTF0_INTR] = {
75 MDP_INTF_INTR_CLEAR(0),
76 MDP_INTF_INTR_EN(0),
77 MDP_INTF_INTR_STATUS(0)
78 },
79 [MDP_INTF1_INTR] = {
80 MDP_INTF_INTR_CLEAR(1),
81 MDP_INTF_INTR_EN(1),
82 MDP_INTF_INTR_STATUS(1)
83 },
84 [MDP_INTF2_INTR] = {
85 MDP_INTF_INTR_CLEAR(2),
86 MDP_INTF_INTR_EN(2),
87 MDP_INTF_INTR_STATUS(2)
88 },
89 [MDP_INTF3_INTR] = {
90 MDP_INTF_INTR_CLEAR(3),
91 MDP_INTF_INTR_EN(3),
92 MDP_INTF_INTR_STATUS(3)
93 },
94 [MDP_INTF4_INTR] = {
95 MDP_INTF_INTR_CLEAR(4),
96 MDP_INTF_INTR_EN(4),
97 MDP_INTF_INTR_STATUS(4)
98 },
99 [MDP_INTF5_INTR] = {
100 MDP_INTF_INTR_CLEAR(5),
101 MDP_INTF_INTR_EN(5),
102 MDP_INTF_INTR_STATUS(5)
103 },
104 [MDP_INTF1_TEAR_INTR] = {
105 MDP_INTF_INTR_TEAR_CLEAR(1),
106 MDP_INTF_INTR_TEAR_EN(1),
107 MDP_INTF_INTR_TEAR_STATUS(1)
108 },
109 [MDP_INTF2_TEAR_INTR] = {
110 MDP_INTF_INTR_TEAR_CLEAR(2),
111 MDP_INTF_INTR_TEAR_EN(2),
112 MDP_INTF_INTR_TEAR_STATUS(2)
113 },
114 [MDP_AD4_0_INTR] = {
115 MDP_AD4_INTR_CLEAR_OFF(0),
116 MDP_AD4_INTR_EN_OFF(0),
117 MDP_AD4_INTR_STATUS_OFF(0),
118 },
119 [MDP_AD4_1_INTR] = {
120 MDP_AD4_INTR_CLEAR_OFF(1),
121 MDP_AD4_INTR_EN_OFF(1),
122 MDP_AD4_INTR_STATUS_OFF(1),
123 },
124 };
125
126 /*
127 * dpu_intr_set_7xxx - List of DPU interrupt registers for DPU >= 7.0
128 */
129 static const struct dpu_intr_reg dpu_intr_set_7xxx[] = {
130 [MDP_SSPP_TOP0_INTR] = {
131 INTR_CLEAR,
132 INTR_EN,
133 INTR_STATUS
134 },
135 [MDP_SSPP_TOP0_INTR2] = {
136 INTR2_CLEAR,
137 INTR2_EN,
138 INTR2_STATUS
139 },
140 [MDP_SSPP_TOP0_HIST_INTR] = {
141 HIST_INTR_CLEAR,
142 HIST_INTR_EN,
143 HIST_INTR_STATUS
144 },
145 [MDP_INTF0_INTR] = {
146 MDP_INTF_REV_7xxx_INTR_CLEAR(0),
147 MDP_INTF_REV_7xxx_INTR_EN(0),
148 MDP_INTF_REV_7xxx_INTR_STATUS(0)
149 },
150 [MDP_INTF1_INTR] = {
151 MDP_INTF_REV_7xxx_INTR_CLEAR(1),
152 MDP_INTF_REV_7xxx_INTR_EN(1),
153 MDP_INTF_REV_7xxx_INTR_STATUS(1)
154 },
155 [MDP_INTF1_TEAR_INTR] = {
156 MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(1),
157 MDP_INTF_REV_7xxx_INTR_TEAR_EN(1),
158 MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(1)
159 },
160 [MDP_INTF2_INTR] = {
161 MDP_INTF_REV_7xxx_INTR_CLEAR(2),
162 MDP_INTF_REV_7xxx_INTR_EN(2),
163 MDP_INTF_REV_7xxx_INTR_STATUS(2)
164 },
165 [MDP_INTF2_TEAR_INTR] = {
166 MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(2),
167 MDP_INTF_REV_7xxx_INTR_TEAR_EN(2),
168 MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(2)
169 },
170 [MDP_INTF3_INTR] = {
171 MDP_INTF_REV_7xxx_INTR_CLEAR(3),
172 MDP_INTF_REV_7xxx_INTR_EN(3),
173 MDP_INTF_REV_7xxx_INTR_STATUS(3)
174 },
175 [MDP_INTF4_INTR] = {
176 MDP_INTF_REV_7xxx_INTR_CLEAR(4),
177 MDP_INTF_REV_7xxx_INTR_EN(4),
178 MDP_INTF_REV_7xxx_INTR_STATUS(4)
179 },
180 [MDP_INTF5_INTR] = {
181 MDP_INTF_REV_7xxx_INTR_CLEAR(5),
182 MDP_INTF_REV_7xxx_INTR_EN(5),
183 MDP_INTF_REV_7xxx_INTR_STATUS(5)
184 },
185 [MDP_INTF6_INTR] = {
186 MDP_INTF_REV_7xxx_INTR_CLEAR(6),
187 MDP_INTF_REV_7xxx_INTR_EN(6),
188 MDP_INTF_REV_7xxx_INTR_STATUS(6)
189 },
190 [MDP_INTF7_INTR] = {
191 MDP_INTF_REV_7xxx_INTR_CLEAR(7),
192 MDP_INTF_REV_7xxx_INTR_EN(7),
193 MDP_INTF_REV_7xxx_INTR_STATUS(7)
194 },
195 [MDP_INTF8_INTR] = {
196 MDP_INTF_REV_7xxx_INTR_CLEAR(8),
197 MDP_INTF_REV_7xxx_INTR_EN(8),
198 MDP_INTF_REV_7xxx_INTR_STATUS(8)
199 },
200 };
201
202 #define DPU_IRQ_MASK(irq_idx) (BIT(DPU_IRQ_BIT(irq_idx)))
203
dpu_core_irq_is_valid(unsigned int irq_idx)204 static inline bool dpu_core_irq_is_valid(unsigned int irq_idx)
205 {
206 return irq_idx && irq_idx <= DPU_NUM_IRQS;
207 }
208
dpu_core_irq_get_entry(struct dpu_hw_intr * intr,unsigned int irq_idx)209 static inline struct dpu_hw_intr_entry *dpu_core_irq_get_entry(struct dpu_hw_intr *intr,
210 unsigned int irq_idx)
211 {
212 return &intr->irq_tbl[irq_idx - 1];
213 }
214
215 /**
216 * dpu_core_irq_callback_handler - dispatch core interrupts
217 * @dpu_kms: Pointer to DPU's KMS structure
218 * @irq_idx: interrupt index
219 */
dpu_core_irq_callback_handler(struct dpu_kms * dpu_kms,unsigned int irq_idx)220 static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, unsigned int irq_idx)
221 {
222 struct dpu_hw_intr_entry *irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
223
224 VERB("IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
225
226 if (!irq_entry->cb) {
227 DRM_ERROR("no registered cb, IRQ=[%d, %d]\n",
228 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
229 return;
230 }
231
232 atomic_inc(&irq_entry->count);
233
234 /*
235 * Perform registered function callback
236 */
237 irq_entry->cb(irq_entry->arg);
238 }
239
240 /**
241 * dpu_core_irq - core IRQ handler
242 * @kms: MSM KMS handle
243 * @return: interrupt handling status
244 */
dpu_core_irq(struct msm_kms * kms)245 irqreturn_t dpu_core_irq(struct msm_kms *kms)
246 {
247 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
248 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
249 int reg_idx;
250 unsigned int irq_idx;
251 u32 irq_status;
252 u32 enable_mask;
253 int bit;
254 unsigned long irq_flags;
255
256 if (!intr)
257 return IRQ_NONE;
258
259 spin_lock_irqsave(&intr->irq_lock, irq_flags);
260 for (reg_idx = 0; reg_idx < MDP_INTR_MAX; reg_idx++) {
261 if (!test_bit(reg_idx, &intr->irq_mask))
262 continue;
263
264 /* Read interrupt status */
265 irq_status = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].status_off);
266
267 /* Read enable mask */
268 enable_mask = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].en_off);
269
270 /* and clear the interrupt */
271 if (irq_status)
272 DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
273 irq_status);
274
275 /* Finally update IRQ status based on enable mask */
276 irq_status &= enable_mask;
277
278 if (!irq_status)
279 continue;
280
281 /*
282 * Search through matching intr status.
283 */
284 while ((bit = ffs(irq_status)) != 0) {
285 irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
286
287 dpu_core_irq_callback_handler(dpu_kms, irq_idx);
288
289 /*
290 * When callback finish, clear the irq_status
291 * with the matching mask. Once irq_status
292 * is all cleared, the search can be stopped.
293 */
294 irq_status &= ~BIT(bit - 1);
295 }
296 }
297
298 /* ensure register writes go through */
299 wmb();
300
301 spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
302
303 return IRQ_HANDLED;
304 }
305
dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr * intr,unsigned int irq_idx)306 static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr,
307 unsigned int irq_idx)
308 {
309 int reg_idx;
310 const struct dpu_intr_reg *reg;
311 const char *dbgstr = NULL;
312 uint32_t cache_irq_mask;
313
314 if (!intr)
315 return -EINVAL;
316
317 if (!dpu_core_irq_is_valid(irq_idx)) {
318 pr_err("invalid IRQ=[%d, %d]\n",
319 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
320 return -EINVAL;
321 }
322
323 /*
324 * The cache_irq_mask and hardware RMW operations needs to be done
325 * under irq_lock and it's the caller's responsibility to ensure that's
326 * held.
327 */
328 assert_spin_locked(&intr->irq_lock);
329
330 reg_idx = DPU_IRQ_REG(irq_idx);
331 reg = &intr->intr_set[reg_idx];
332
333 /* Is this interrupt register supported on the platform */
334 if (WARN_ON(!reg->en_off))
335 return -EINVAL;
336
337 cache_irq_mask = intr->cache_irq_mask[reg_idx];
338 if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
339 dbgstr = "already ";
340 } else {
341 dbgstr = "";
342
343 cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
344 /* Cleaning any pending interrupt */
345 DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
346 /* Enabling interrupts with the new mask */
347 DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
348
349 /* ensure register write goes through */
350 wmb();
351
352 intr->cache_irq_mask[reg_idx] = cache_irq_mask;
353 }
354
355 pr_debug("DPU IRQ=[%d, %d] %senabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n",
356 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), dbgstr,
357 DPU_IRQ_MASK(irq_idx), cache_irq_mask);
358
359 return 0;
360 }
361
dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr * intr,unsigned int irq_idx)362 static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr,
363 unsigned int irq_idx)
364 {
365 int reg_idx;
366 const struct dpu_intr_reg *reg;
367 const char *dbgstr = NULL;
368 uint32_t cache_irq_mask;
369
370 if (!intr)
371 return -EINVAL;
372
373 if (!dpu_core_irq_is_valid(irq_idx)) {
374 pr_err("invalid IRQ=[%d, %d]\n",
375 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
376 return -EINVAL;
377 }
378
379 /*
380 * The cache_irq_mask and hardware RMW operations needs to be done
381 * under irq_lock and it's the caller's responsibility to ensure that's
382 * held.
383 */
384 assert_spin_locked(&intr->irq_lock);
385
386 reg_idx = DPU_IRQ_REG(irq_idx);
387 reg = &intr->intr_set[reg_idx];
388
389 cache_irq_mask = intr->cache_irq_mask[reg_idx];
390 if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
391 dbgstr = "already ";
392 } else {
393 dbgstr = "";
394
395 cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
396 /* Disable interrupts based on the new mask */
397 DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
398 /* Cleaning any pending interrupt */
399 DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
400
401 /* ensure register write goes through */
402 wmb();
403
404 intr->cache_irq_mask[reg_idx] = cache_irq_mask;
405 }
406
407 pr_debug("DPU IRQ=[%d, %d] %sdisabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n",
408 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), dbgstr,
409 DPU_IRQ_MASK(irq_idx), cache_irq_mask);
410
411 return 0;
412 }
413
dpu_clear_irqs(struct dpu_kms * dpu_kms)414 static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
415 {
416 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
417 int i;
418
419 if (!intr)
420 return;
421
422 for (i = 0; i < MDP_INTR_MAX; i++) {
423 if (test_bit(i, &intr->irq_mask))
424 DPU_REG_WRITE(&intr->hw,
425 intr->intr_set[i].clr_off, 0xffffffff);
426 }
427
428 /* ensure register writes go through */
429 wmb();
430 }
431
dpu_disable_all_irqs(struct dpu_kms * dpu_kms)432 static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
433 {
434 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
435 int i;
436
437 if (!intr)
438 return;
439
440 for (i = 0; i < MDP_INTR_MAX; i++) {
441 if (test_bit(i, &intr->irq_mask))
442 DPU_REG_WRITE(&intr->hw,
443 intr->intr_set[i].en_off, 0x00000000);
444 }
445
446 /* ensure register writes go through */
447 wmb();
448 }
449
450 /**
451 * dpu_core_irq_read - IRQ helper function for reading IRQ status
452 * @dpu_kms: DPU handle
453 * @irq_idx: irq index
454 * @return: non-zero if irq detected; otherwise no irq detected
455 */
dpu_core_irq_read(struct dpu_kms * dpu_kms,unsigned int irq_idx)456 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms,
457 unsigned int irq_idx)
458 {
459 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
460 int reg_idx;
461 unsigned long irq_flags;
462 u32 intr_status;
463
464 if (!intr)
465 return 0;
466
467 if (!dpu_core_irq_is_valid(irq_idx)) {
468 pr_err("invalid IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
469 return 0;
470 }
471
472 spin_lock_irqsave(&intr->irq_lock, irq_flags);
473
474 reg_idx = DPU_IRQ_REG(irq_idx);
475 intr_status = DPU_REG_READ(&intr->hw,
476 intr->intr_set[reg_idx].status_off) &
477 DPU_IRQ_MASK(irq_idx);
478 if (intr_status)
479 DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
480 intr_status);
481
482 /* ensure register writes go through */
483 wmb();
484
485 spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
486
487 return intr_status;
488 }
489
490 /**
491 * dpu_hw_intr_init(): Initializes the interrupts hw object
492 * @dev: Corresponding device for devres management
493 * @addr: mapped register io address of MDP
494 * @m: pointer to MDSS catalog data
495 */
dpu_hw_intr_init(struct drm_device * dev,void __iomem * addr,const struct dpu_mdss_cfg * m)496 struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev,
497 void __iomem *addr,
498 const struct dpu_mdss_cfg *m)
499 {
500 struct dpu_hw_intr *intr;
501 unsigned int i;
502
503 if (!addr || !m)
504 return ERR_PTR(-EINVAL);
505
506 intr = drmm_kzalloc(dev, sizeof(*intr), GFP_KERNEL);
507 if (!intr)
508 return ERR_PTR(-ENOMEM);
509
510 if (m->mdss_ver->core_major_ver >= 7)
511 intr->intr_set = dpu_intr_set_7xxx;
512 else
513 intr->intr_set = dpu_intr_set_legacy;
514
515 intr->hw.blk_addr = addr + m->mdp[0].base;
516
517 intr->irq_mask = BIT(MDP_SSPP_TOP0_INTR) |
518 BIT(MDP_SSPP_TOP0_INTR2) |
519 BIT(MDP_SSPP_TOP0_HIST_INTR);
520 for (i = 0; i < m->intf_count; i++) {
521 const struct dpu_intf_cfg *intf = &m->intf[i];
522
523 if (intf->type == INTF_NONE)
524 continue;
525
526 intr->irq_mask |= BIT(MDP_INTFn_INTR(intf->id));
527
528 if (intf->intr_tear_rd_ptr)
529 intr->irq_mask |= BIT(DPU_IRQ_REG(intf->intr_tear_rd_ptr));
530 }
531
532 spin_lock_init(&intr->irq_lock);
533
534 return intr;
535 }
536
537 /**
538 * dpu_core_irq_register_callback - For registering callback function on IRQ
539 * interrupt
540 * @dpu_kms: DPU handle
541 * @irq_idx: irq index
542 * @irq_cb: IRQ callback function.
543 * @irq_arg: IRQ callback argument.
544 * @return: 0 for success registering callback, otherwise failure
545 *
546 * This function supports registration of multiple callbacks for each interrupt.
547 */
dpu_core_irq_register_callback(struct dpu_kms * dpu_kms,unsigned int irq_idx,void (* irq_cb)(void * arg),void * irq_arg)548 int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
549 unsigned int irq_idx,
550 void (*irq_cb)(void *arg),
551 void *irq_arg)
552 {
553 struct dpu_hw_intr_entry *irq_entry;
554 unsigned long irq_flags;
555 int ret;
556
557 if (!irq_cb) {
558 DPU_ERROR("IRQ=[%d, %d] NULL callback\n",
559 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
560 return -EINVAL;
561 }
562
563 if (!dpu_core_irq_is_valid(irq_idx)) {
564 DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
565 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
566 return -EINVAL;
567 }
568
569 VERB("[%pS] IRQ=[%d, %d]\n", __builtin_return_address(0),
570 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
571
572 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
573
574 irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
575 if (unlikely(WARN_ON(irq_entry->cb))) {
576 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
577
578 return -EBUSY;
579 }
580
581 trace_dpu_core_irq_register_callback(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
582 irq_entry->arg = irq_arg;
583 irq_entry->cb = irq_cb;
584
585 ret = dpu_hw_intr_enable_irq_locked(
586 dpu_kms->hw_intr,
587 irq_idx);
588 if (ret)
589 DPU_ERROR("Failed/ to enable IRQ=[%d, %d]\n",
590 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
591 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
592
593 trace_dpu_irq_register_success(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
594
595 return 0;
596 }
597
598 /**
599 * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
600 * interrupt
601 * @dpu_kms: DPU handle
602 * @irq_idx: irq index
603 * @return: 0 for success registering callback, otherwise failure
604 *
605 * This function supports registration of multiple callbacks for each interrupt.
606 */
dpu_core_irq_unregister_callback(struct dpu_kms * dpu_kms,unsigned int irq_idx)607 int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms,
608 unsigned int irq_idx)
609 {
610 struct dpu_hw_intr_entry *irq_entry;
611 unsigned long irq_flags;
612 int ret;
613
614 if (!dpu_core_irq_is_valid(irq_idx)) {
615 DPU_ERROR("invalid IRQ=[%d, %d]\n",
616 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
617 return -EINVAL;
618 }
619
620 VERB("[%pS] IRQ=[%d, %d]\n", __builtin_return_address(0),
621 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
622
623 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
624 trace_dpu_core_irq_unregister_callback(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
625
626 ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
627 if (ret)
628 DPU_ERROR("Failed to disable IRQ=[%d, %d]: %d\n",
629 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), ret);
630
631 irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
632 irq_entry->cb = NULL;
633 irq_entry->arg = NULL;
634
635 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
636
637 trace_dpu_irq_unregister_success(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
638
639 return 0;
640 }
641
642 #ifdef CONFIG_DEBUG_FS
dpu_debugfs_core_irq_show(struct seq_file * s,void * v)643 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
644 {
645 struct dpu_kms *dpu_kms = s->private;
646 struct dpu_hw_intr_entry *irq_entry;
647 unsigned long irq_flags;
648 int i, irq_count;
649 void *cb;
650
651 for (i = 1; i <= DPU_NUM_IRQS; i++) {
652 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
653 irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
654 irq_count = atomic_read(&irq_entry->count);
655 cb = irq_entry->cb;
656 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
657
658 if (irq_count || cb)
659 seq_printf(s, "IRQ=[%d, %d] count:%d cb:%ps\n",
660 DPU_IRQ_REG(i), DPU_IRQ_BIT(i), irq_count, cb);
661 }
662
663 return 0;
664 }
665
666 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
667
668 /**
669 * dpu_debugfs_core_irq_init - register core irq debugfs
670 * @dpu_kms: pointer to kms
671 * @parent: debugfs directory root
672 */
dpu_debugfs_core_irq_init(struct dpu_kms * dpu_kms,struct dentry * parent)673 void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
674 struct dentry *parent)
675 {
676 debugfs_create_file("core_irq", 0600, parent, dpu_kms,
677 &dpu_debugfs_core_irq_fops);
678 }
679 #endif
680
681 /**
682 * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
683 * @kms: MSM KMS handle
684 * @return: none
685 */
dpu_core_irq_preinstall(struct msm_kms * kms)686 void dpu_core_irq_preinstall(struct msm_kms *kms)
687 {
688 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
689 struct dpu_hw_intr_entry *irq_entry;
690 int i;
691
692 pm_runtime_get_sync(&dpu_kms->pdev->dev);
693 dpu_clear_irqs(dpu_kms);
694 dpu_disable_all_irqs(dpu_kms);
695 pm_runtime_put_sync(&dpu_kms->pdev->dev);
696
697 for (i = 1; i <= DPU_NUM_IRQS; i++) {
698 irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
699 atomic_set(&irq_entry->count, 0);
700 }
701 }
702
703 /**
704 * dpu_core_irq_uninstall - uninstall core IRQ handler
705 * @kms: MSM KMS handle
706 * @return: none
707 */
dpu_core_irq_uninstall(struct msm_kms * kms)708 void dpu_core_irq_uninstall(struct msm_kms *kms)
709 {
710 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
711 struct dpu_hw_intr_entry *irq_entry;
712 int i;
713
714 if (!dpu_kms->hw_intr)
715 return;
716
717 pm_runtime_get_sync(&dpu_kms->pdev->dev);
718 for (i = 1; i <= DPU_NUM_IRQS; i++) {
719 irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
720 if (irq_entry->cb)
721 DPU_ERROR("IRQ=[%d, %d] still enabled/registered\n",
722 DPU_IRQ_REG(i), DPU_IRQ_BIT(i));
723 }
724
725 dpu_clear_irqs(dpu_kms);
726 dpu_disable_all_irqs(dpu_kms);
727 pm_runtime_put_sync(&dpu_kms->pdev->dev);
728 }
729