]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
Merge tag 'drm-intel-next-2017-05-29' of git://anongit.freedesktop.org/git/drm-intel...
[karo-tx-linux.git] / drivers / gpu / drm / msm / mdp / mdp5 / mdp5_crtc.c
1 /*
2  * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/sort.h>
20 #include <drm/drm_mode.h>
21 #include <drm/drm_crtc.h>
22 #include <drm/drm_crtc_helper.h>
23 #include <drm/drm_flip_work.h>
24
25 #include "mdp5_kms.h"
26
27 #define CURSOR_WIDTH    64
28 #define CURSOR_HEIGHT   64
29
30 struct mdp5_crtc {
31         struct drm_crtc base;
32         int id;
33         bool enabled;
34
35         spinlock_t lm_lock;     /* protect REG_MDP5_LM_* registers */
36
37         /* if there is a pending flip, these will be non-null: */
38         struct drm_pending_vblank_event *event;
39
40         /* Bits have been flushed at the last commit,
41          * used to decide if a vsync has happened since last commit.
42          */
43         u32 flushed_mask;
44
45 #define PENDING_CURSOR 0x1
46 #define PENDING_FLIP   0x2
47         atomic_t pending;
48
49         /* for unref'ing cursor bo's after scanout completes: */
50         struct drm_flip_work unref_cursor_work;
51
52         struct mdp_irq vblank;
53         struct mdp_irq err;
54         struct mdp_irq pp_done;
55
56         struct completion pp_completion;
57
58         struct {
59                 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
60                 spinlock_t lock;
61
62                 /* current cursor being scanned out: */
63                 struct drm_gem_object *scanout_bo;
64                 uint32_t width, height;
65                 uint32_t x, y;
66         } cursor;
67 };
68 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
69
70 static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
71 {
72         struct msm_drm_private *priv = crtc->dev->dev_private;
73         return to_mdp5_kms(to_mdp_kms(priv->kms));
74 }
75
76 static void request_pending(struct drm_crtc *crtc, uint32_t pending)
77 {
78         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
79
80         atomic_or(pending, &mdp5_crtc->pending);
81         mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
82 }
83
84 static void request_pp_done_pending(struct drm_crtc *crtc)
85 {
86         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
87         reinit_completion(&mdp5_crtc->pp_completion);
88 }
89
90 static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
91 {
92         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
93         struct mdp5_ctl *ctl = mdp5_cstate->ctl;
94         struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
95
96         DBG("%s: flush=%08x", crtc->name, flush_mask);
97         return mdp5_ctl_commit(ctl, pipeline, flush_mask);
98 }
99
100 /*
101  * flush updates, to make sure hw is updated to new scanout fb,
102  * so that we can safely queue unref to current fb (ie. next
103  * vblank we know hw is done w/ previous scanout_fb).
104  */
105 static u32 crtc_flush_all(struct drm_crtc *crtc)
106 {
107         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
108         struct mdp5_hw_mixer *mixer, *r_mixer;
109         struct drm_plane *plane;
110         uint32_t flush_mask = 0;
111
112         /* this should not happen: */
113         if (WARN_ON(!mdp5_cstate->ctl))
114                 return 0;
115
116         drm_atomic_crtc_for_each_plane(plane, crtc) {
117                 flush_mask |= mdp5_plane_get_flush(plane);
118         }
119
120         mixer = mdp5_cstate->pipeline.mixer;
121         flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
122
123         r_mixer = mdp5_cstate->pipeline.r_mixer;
124         if (r_mixer)
125                 flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
126
127         return crtc_flush(crtc, flush_mask);
128 }
129
130 /* if file!=NULL, this is preclose potential cancel-flip path */
131 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
132 {
133         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
134         struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
135         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
136         struct mdp5_ctl *ctl = mdp5_cstate->ctl;
137         struct drm_device *dev = crtc->dev;
138         struct drm_pending_vblank_event *event;
139         unsigned long flags;
140
141         spin_lock_irqsave(&dev->event_lock, flags);
142         event = mdp5_crtc->event;
143         if (event) {
144                 mdp5_crtc->event = NULL;
145                 DBG("%s: send event: %p", crtc->name, event);
146                 drm_crtc_send_vblank_event(crtc, event);
147         }
148         spin_unlock_irqrestore(&dev->event_lock, flags);
149
150         if (ctl && !crtc->state->enable) {
151                 /* set STAGE_UNUSED for all layers */
152                 mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
153                 /* XXX: What to do here? */
154                 /* mdp5_crtc->ctl = NULL; */
155         }
156 }
157
158 static void unref_cursor_worker(struct drm_flip_work *work, void *val)
159 {
160         struct mdp5_crtc *mdp5_crtc =
161                 container_of(work, struct mdp5_crtc, unref_cursor_work);
162         struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
163
164         msm_gem_put_iova(val, mdp5_kms->id);
165         drm_gem_object_unreference_unlocked(val);
166 }
167
168 static void mdp5_crtc_destroy(struct drm_crtc *crtc)
169 {
170         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
171
172         drm_crtc_cleanup(crtc);
173         drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
174
175         kfree(mdp5_crtc);
176 }
177
178 static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
179 {
180         switch (stage) {
181         case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
182         case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
183         case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
184         case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
185         case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
186         case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
187         case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
188         default:
189                 return 0;
190         }
191 }
192
193 /*
194  * left/right pipe offsets for the stage array used in blend_setup()
195  */
196 #define PIPE_LEFT       0
197 #define PIPE_RIGHT      1
198
199 /*
200  * blend_setup() - blend all the planes of a CRTC
201  *
202  * If no base layer is available, border will be enabled as the base layer.
203  * Otherwise all layers will be blended based on their stage calculated
204  * in mdp5_crtc_atomic_check.
205  */
206 static void blend_setup(struct drm_crtc *crtc)
207 {
208         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
209         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
210         struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
211         struct mdp5_kms *mdp5_kms = get_kms(crtc);
212         struct drm_plane *plane;
213         const struct mdp5_cfg_hw *hw_cfg;
214         struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
215         const struct mdp_format *format;
216         struct mdp5_hw_mixer *mixer = pipeline->mixer;
217         uint32_t lm = mixer->lm;
218         struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
219         uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
220         struct mdp5_ctl *ctl = mdp5_cstate->ctl;
221         uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
222         unsigned long flags;
223         enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
224         enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
225         int i, plane_cnt = 0;
226         bool bg_alpha_enabled = false;
227         u32 mixer_op_mode = 0;
228         u32 val;
229 #define blender(stage)  ((stage) - STAGE0)
230
231         hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
232
233         spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
234
235         /* ctl could be released already when we are shutting down: */
236         /* XXX: Can this happen now? */
237         if (!ctl)
238                 goto out;
239
240         /* Collect all plane information */
241         drm_atomic_crtc_for_each_plane(plane, crtc) {
242                 enum mdp5_pipe right_pipe;
243
244                 pstate = to_mdp5_plane_state(plane->state);
245                 pstates[pstate->stage] = pstate;
246                 stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
247                 /*
248                  * if we have a right mixer, stage the same pipe as we
249                  * have on the left mixer
250                  */
251                 if (r_mixer)
252                         r_stage[pstate->stage][PIPE_LEFT] =
253                                                 mdp5_plane_pipe(plane);
254                 /*
255                  * if we have a right pipe (i.e, the plane comprises of 2
256                  * hwpipes, then stage the right pipe on the right side of both
257                  * the layer mixers
258                  */
259                 right_pipe = mdp5_plane_right_pipe(plane);
260                 if (right_pipe) {
261                         stage[pstate->stage][PIPE_RIGHT] = right_pipe;
262                         r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
263                 }
264
265                 plane_cnt++;
266         }
267
268         if (!pstates[STAGE_BASE]) {
269                 ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
270                 DBG("Border Color is enabled");
271         } else if (plane_cnt) {
272                 format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
273
274                 if (format->alpha_enable)
275                         bg_alpha_enabled = true;
276         }
277
278         /* The reset for blending */
279         for (i = STAGE0; i <= STAGE_MAX; i++) {
280                 if (!pstates[i])
281                         continue;
282
283                 format = to_mdp_format(
284                         msm_framebuffer_format(pstates[i]->base.fb));
285                 plane = pstates[i]->base.plane;
286                 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
287                         MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
288                 fg_alpha = pstates[i]->alpha;
289                 bg_alpha = 0xFF - pstates[i]->alpha;
290
291                 if (!format->alpha_enable && bg_alpha_enabled)
292                         mixer_op_mode = 0;
293                 else
294                         mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);
295
296                 DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
297
298                 if (format->alpha_enable && pstates[i]->premultiplied) {
299                         blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
300                                 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
301                         if (fg_alpha != 0xff) {
302                                 bg_alpha = fg_alpha;
303                                 blend_op |=
304                                         MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
305                                         MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
306                         } else {
307                                 blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
308                         }
309                 } else if (format->alpha_enable) {
310                         blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
311                                 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
312                         if (fg_alpha != 0xff) {
313                                 bg_alpha = fg_alpha;
314                                 blend_op |=
315                                        MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
316                                        MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
317                                        MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
318                                        MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
319                         } else {
320                                 blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
321                         }
322                 }
323
324                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
325                                 blender(i)), blend_op);
326                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
327                                 blender(i)), fg_alpha);
328                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
329                                 blender(i)), bg_alpha);
330                 if (r_mixer) {
331                         mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
332                                         blender(i)), blend_op);
333                         mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
334                                         blender(i)), fg_alpha);
335                         mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
336                                         blender(i)), bg_alpha);
337                 }
338         }
339
340         val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
341         mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
342                    val | mixer_op_mode);
343         if (r_mixer) {
344                 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
345                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
346                            val | mixer_op_mode);
347         }
348
349         mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
350                        ctl_blend_flags);
351 out:
352         spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
353 }
354
355 static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
356 {
357         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
358         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
359         struct mdp5_kms *mdp5_kms = get_kms(crtc);
360         struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
361         struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
362         uint32_t lm = mixer->lm;
363         u32 mixer_width, val;
364         unsigned long flags;
365         struct drm_display_mode *mode;
366
367         if (WARN_ON(!crtc->state))
368                 return;
369
370         mode = &crtc->state->adjusted_mode;
371
372         DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
373                         crtc->name, mode->base.id, mode->name,
374                         mode->vrefresh, mode->clock,
375                         mode->hdisplay, mode->hsync_start,
376                         mode->hsync_end, mode->htotal,
377                         mode->vdisplay, mode->vsync_start,
378                         mode->vsync_end, mode->vtotal,
379                         mode->type, mode->flags);
380
381         mixer_width = mode->hdisplay;
382         if (r_mixer)
383                 mixer_width /= 2;
384
385         spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
386         mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
387                         MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
388                         MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
389
390         /* Assign mixer to LEFT side in source split mode */
391         val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
392         val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
393         mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
394
395         if (r_mixer) {
396                 u32 r_lm = r_mixer->lm;
397
398                 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
399                            MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
400                            MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
401
402                 /* Assign mixer to RIGHT side in source split mode */
403                 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
404                 val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
405                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
406         }
407
408         spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
409 }
410
411 static void mdp5_crtc_disable(struct drm_crtc *crtc)
412 {
413         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
414         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
415         struct mdp5_kms *mdp5_kms = get_kms(crtc);
416
417         DBG("%s", crtc->name);
418
419         if (WARN_ON(!mdp5_crtc->enabled))
420                 return;
421
422         if (mdp5_cstate->cmd_mode)
423                 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
424
425         mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
426         mdp5_disable(mdp5_kms);
427
428         mdp5_crtc->enabled = false;
429 }
430
431 static void mdp5_crtc_enable(struct drm_crtc *crtc)
432 {
433         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
434         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
435         struct mdp5_kms *mdp5_kms = get_kms(crtc);
436
437         DBG("%s", crtc->name);
438
439         if (WARN_ON(mdp5_crtc->enabled))
440                 return;
441
442         mdp5_enable(mdp5_kms);
443         mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
444
445         if (mdp5_cstate->cmd_mode)
446                 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
447
448         mdp5_crtc->enabled = true;
449 }
450
451 int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
452                              struct drm_crtc_state *new_crtc_state,
453                              bool need_right_mixer)
454 {
455         struct mdp5_crtc_state *mdp5_cstate =
456                         to_mdp5_crtc_state(new_crtc_state);
457         struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
458         struct mdp5_interface *intf;
459         bool new_mixer = false;
460
461         new_mixer = !pipeline->mixer;
462
463         if ((need_right_mixer && !pipeline->r_mixer) ||
464             (!need_right_mixer && pipeline->r_mixer))
465                 new_mixer = true;
466
467         if (new_mixer) {
468                 struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
469                 struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
470                 u32 caps;
471                 int ret;
472
473                 caps = MDP_LM_CAP_DISPLAY;
474                 if (need_right_mixer)
475                         caps |= MDP_LM_CAP_PAIR;
476
477                 ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
478                                         &pipeline->mixer, need_right_mixer ?
479                                         &pipeline->r_mixer : NULL);
480                 if (ret)
481                         return ret;
482
483                 mdp5_mixer_release(new_crtc_state->state, old_mixer);
484                 if (old_r_mixer) {
485                         mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
486                         if (!need_right_mixer)
487                                 pipeline->r_mixer = NULL;
488                 }
489         }
490
491         /*
492          * these should have been already set up in the encoder's atomic
493          * check (called by drm_atomic_helper_check_modeset)
494          */
495         intf = pipeline->intf;
496
497         mdp5_cstate->err_irqmask = intf2err(intf->num);
498         mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
499
500         if ((intf->type == INTF_DSI) &&
501             (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
502                 mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
503                 mdp5_cstate->cmd_mode = true;
504         } else {
505                 mdp5_cstate->pp_done_irqmask = 0;
506                 mdp5_cstate->cmd_mode = false;
507         }
508
509         return 0;
510 }
511
512 struct plane_state {
513         struct drm_plane *plane;
514         struct mdp5_plane_state *state;
515 };
516
517 static int pstate_cmp(const void *a, const void *b)
518 {
519         struct plane_state *pa = (struct plane_state *)a;
520         struct plane_state *pb = (struct plane_state *)b;
521         return pa->state->zpos - pb->state->zpos;
522 }
523
524 /* is there a helper for this? */
525 static bool is_fullscreen(struct drm_crtc_state *cstate,
526                 struct drm_plane_state *pstate)
527 {
528         return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
529                 ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
530                 ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
531 }
532
533 enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
534                                         struct drm_crtc_state *new_crtc_state,
535                                         struct drm_plane_state *bpstate)
536 {
537         struct mdp5_crtc_state *mdp5_cstate =
538                         to_mdp5_crtc_state(new_crtc_state);
539
540         /*
541          * if we're in source split mode, it's mandatory to have
542          * border out on the base stage
543          */
544         if (mdp5_cstate->pipeline.r_mixer)
545                 return STAGE0;
546
547         /* if the bottom-most layer is not fullscreen, we need to use
548          * it for solid-color:
549          */
550         if (!is_fullscreen(new_crtc_state, bpstate))
551                 return STAGE0;
552
553         return STAGE_BASE;
554 }
555
556 static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
557                 struct drm_crtc_state *state)
558 {
559         struct mdp5_kms *mdp5_kms = get_kms(crtc);
560         struct drm_plane *plane;
561         struct drm_device *dev = crtc->dev;
562         struct plane_state pstates[STAGE_MAX + 1];
563         const struct mdp5_cfg_hw *hw_cfg;
564         const struct drm_plane_state *pstate;
565         const struct drm_display_mode *mode = &state->adjusted_mode;
566         bool cursor_plane = false;
567         bool need_right_mixer = false;
568         int cnt = 0, i;
569         int ret;
570         enum mdp_mixer_stage_id start;
571
572         DBG("%s: check", crtc->name);
573
574         drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
575                 pstates[cnt].plane = plane;
576                 pstates[cnt].state = to_mdp5_plane_state(pstate);
577
578                 /*
579                  * if any plane on this crtc uses 2 hwpipes, then we need
580                  * the crtc to have a right hwmixer.
581                  */
582                 if (pstates[cnt].state->r_hwpipe)
583                         need_right_mixer = true;
584                 cnt++;
585
586                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
587                         cursor_plane = true;
588         }
589
590         /* bail out early if there aren't any planes */
591         if (!cnt)
592                 return 0;
593
594         hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
595
596         /*
597          * we need a right hwmixer if the mode's width is greater than a single
598          * LM's max width
599          */
600         if (mode->hdisplay > hw_cfg->lm.max_width)
601                 need_right_mixer = true;
602
603         ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
604         if (ret) {
605                 dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
606                 return ret;
607         }
608
609         /* assign a stage based on sorted zpos property */
610         sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
611
612         /* trigger a warning if cursor isn't the highest zorder */
613         WARN_ON(cursor_plane &&
614                 (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
615
616         start = get_start_stage(crtc, state, &pstates[0].state->base);
617
618         /* verify that there are not too many planes attached to crtc
619          * and that we don't have conflicting mixer stages:
620          */
621         if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
622                 dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
623                         cnt, start);
624                 return -EINVAL;
625         }
626
627         for (i = 0; i < cnt; i++) {
628                 if (cursor_plane && (i == (cnt - 1)))
629                         pstates[i].state->stage = hw_cfg->lm.nb_stages;
630                 else
631                         pstates[i].state->stage = start + i;
632                 DBG("%s: assign pipe %s on stage=%d", crtc->name,
633                                 pstates[i].plane->name,
634                                 pstates[i].state->stage);
635         }
636
637         return 0;
638 }
639
640 static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
641                                    struct drm_crtc_state *old_crtc_state)
642 {
643         DBG("%s: begin", crtc->name);
644 }
645
646 static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
647                                    struct drm_crtc_state *old_crtc_state)
648 {
649         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
650         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
651         struct drm_device *dev = crtc->dev;
652         unsigned long flags;
653
654         DBG("%s: event: %p", crtc->name, crtc->state->event);
655
656         WARN_ON(mdp5_crtc->event);
657
658         spin_lock_irqsave(&dev->event_lock, flags);
659         mdp5_crtc->event = crtc->state->event;
660         spin_unlock_irqrestore(&dev->event_lock, flags);
661
662         /*
663          * If no CTL has been allocated in mdp5_crtc_atomic_check(),
664          * it means we are trying to flush a CRTC whose state is disabled:
665          * nothing else needs to be done.
666          */
667         /* XXX: Can this happen now ? */
668         if (unlikely(!mdp5_cstate->ctl))
669                 return;
670
671         blend_setup(crtc);
672
673         /* PP_DONE irq is only used by command mode for now.
674          * It is better to request pending before FLUSH and START trigger
675          * to make sure no pp_done irq missed.
676          * This is safe because no pp_done will happen before SW trigger
677          * in command mode.
678          */
679         if (mdp5_cstate->cmd_mode)
680                 request_pp_done_pending(crtc);
681
682         mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
683
684         /* XXX are we leaking out state here? */
685         mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
686         mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
687         mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
688
689         request_pending(crtc, PENDING_FLIP);
690 }
691
692 static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
693 {
694         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
695         uint32_t xres = crtc->mode.hdisplay;
696         uint32_t yres = crtc->mode.vdisplay;
697
698         /*
699          * Cursor Region Of Interest (ROI) is a plane read from cursor
700          * buffer to render. The ROI region is determined by the visibility of
701          * the cursor point. In the default Cursor image the cursor point will
702          * be at the top left of the cursor image, unless it is specified
703          * otherwise using hotspot feature.
704          *
705          * If the cursor point reaches the right (xres - x < cursor.width) or
706          * bottom (yres - y < cursor.height) boundary of the screen, then ROI
707          * width and ROI height need to be evaluated to crop the cursor image
708          * accordingly.
709          * (xres-x) will be new cursor width when x > (xres - cursor.width)
710          * (yres-y) will be new cursor height when y > (yres - cursor.height)
711          */
712         *roi_w = min(mdp5_crtc->cursor.width, xres -
713                         mdp5_crtc->cursor.x);
714         *roi_h = min(mdp5_crtc->cursor.height, yres -
715                         mdp5_crtc->cursor.y);
716 }
717
718 static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
719                 struct drm_file *file, uint32_t handle,
720                 uint32_t width, uint32_t height)
721 {
722         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
723         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
724         struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
725         struct drm_device *dev = crtc->dev;
726         struct mdp5_kms *mdp5_kms = get_kms(crtc);
727         struct drm_gem_object *cursor_bo, *old_bo = NULL;
728         uint32_t blendcfg, stride;
729         uint64_t cursor_addr;
730         struct mdp5_ctl *ctl;
731         int ret, lm;
732         enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
733         uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
734         uint32_t roi_w, roi_h;
735         bool cursor_enable = true;
736         unsigned long flags;
737
738         if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
739                 dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
740                 return -EINVAL;
741         }
742
743         ctl = mdp5_cstate->ctl;
744         if (!ctl)
745                 return -EINVAL;
746
747         /* don't support LM cursors when we we have source split enabled */
748         if (mdp5_cstate->pipeline.r_mixer)
749                 return -EINVAL;
750
751         if (!handle) {
752                 DBG("Cursor off");
753                 cursor_enable = false;
754                 goto set_cursor;
755         }
756
757         cursor_bo = drm_gem_object_lookup(file, handle);
758         if (!cursor_bo)
759                 return -ENOENT;
760
761         ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
762         if (ret)
763                 return -EINVAL;
764
765         lm = mdp5_cstate->pipeline.mixer->lm;
766         stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
767
768         spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
769         old_bo = mdp5_crtc->cursor.scanout_bo;
770
771         mdp5_crtc->cursor.scanout_bo = cursor_bo;
772         mdp5_crtc->cursor.width = width;
773         mdp5_crtc->cursor.height = height;
774
775         get_roi(crtc, &roi_w, &roi_h);
776
777         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
778         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
779                         MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
780         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
781                         MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
782                         MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
783         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
784                         MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
785                         MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
786         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
787
788         blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
789         blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
790         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
791
792         spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
793
794 set_cursor:
795         ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
796         if (ret) {
797                 dev_err(dev->dev, "failed to %sable cursor: %d\n",
798                                 cursor_enable ? "en" : "dis", ret);
799                 goto end;
800         }
801
802         crtc_flush(crtc, flush_mask);
803
804 end:
805         if (old_bo) {
806                 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
807                 /* enable vblank to complete cursor work: */
808                 request_pending(crtc, PENDING_CURSOR);
809         }
810         return ret;
811 }
812
813 static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
814 {
815         struct mdp5_kms *mdp5_kms = get_kms(crtc);
816         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
817         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
818         uint32_t lm = mdp5_cstate->pipeline.mixer->lm;
819         uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
820         uint32_t roi_w;
821         uint32_t roi_h;
822         unsigned long flags;
823
824         /* don't support LM cursors when we we have source split enabled */
825         if (mdp5_cstate->pipeline.r_mixer)
826                 return -EINVAL;
827
828         /* In case the CRTC is disabled, just drop the cursor update */
829         if (unlikely(!crtc->state->enable))
830                 return 0;
831
832         mdp5_crtc->cursor.x = x = max(x, 0);
833         mdp5_crtc->cursor.y = y = max(y, 0);
834
835         get_roi(crtc, &roi_w, &roi_h);
836
837         spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
838         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
839                         MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
840                         MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
841         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
842                         MDP5_LM_CURSOR_START_XY_Y_START(y) |
843                         MDP5_LM_CURSOR_START_XY_X_START(x));
844         spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
845
846         crtc_flush(crtc, flush_mask);
847
848         return 0;
849 }
850
851 static void
852 mdp5_crtc_atomic_print_state(struct drm_printer *p,
853                              const struct drm_crtc_state *state)
854 {
855         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
856         struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
857         struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
858
859         if (WARN_ON(!pipeline))
860                 return;
861
862         drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
863                         pipeline->mixer->name : "(null)");
864
865         if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
866                 drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
867                            pipeline->r_mixer->name : "(null)");
868 }
869
870 static void mdp5_crtc_reset(struct drm_crtc *crtc)
871 {
872         struct mdp5_crtc_state *mdp5_cstate;
873
874         if (crtc->state) {
875                 __drm_atomic_helper_crtc_destroy_state(crtc->state);
876                 kfree(to_mdp5_crtc_state(crtc->state));
877         }
878
879         mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
880
881         if (mdp5_cstate) {
882                 mdp5_cstate->base.crtc = crtc;
883                 crtc->state = &mdp5_cstate->base;
884         }
885 }
886
887 static struct drm_crtc_state *
888 mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
889 {
890         struct mdp5_crtc_state *mdp5_cstate;
891
892         if (WARN_ON(!crtc->state))
893                 return NULL;
894
895         mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
896                               sizeof(*mdp5_cstate), GFP_KERNEL);
897         if (!mdp5_cstate)
898                 return NULL;
899
900         __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
901
902         return &mdp5_cstate->base;
903 }
904
905 static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
906 {
907         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
908
909         __drm_atomic_helper_crtc_destroy_state(state);
910
911         kfree(mdp5_cstate);
912 }
913
914 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
915         .set_config = drm_atomic_helper_set_config,
916         .destroy = mdp5_crtc_destroy,
917         .page_flip = drm_atomic_helper_page_flip,
918         .set_property = drm_atomic_helper_crtc_set_property,
919         .reset = mdp5_crtc_reset,
920         .atomic_duplicate_state = mdp5_crtc_duplicate_state,
921         .atomic_destroy_state = mdp5_crtc_destroy_state,
922         .cursor_set = mdp5_crtc_cursor_set,
923         .cursor_move = mdp5_crtc_cursor_move,
924         .atomic_print_state = mdp5_crtc_atomic_print_state,
925 };
926
927 static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
928         .set_config = drm_atomic_helper_set_config,
929         .destroy = mdp5_crtc_destroy,
930         .page_flip = drm_atomic_helper_page_flip,
931         .set_property = drm_atomic_helper_crtc_set_property,
932         .reset = mdp5_crtc_reset,
933         .atomic_duplicate_state = mdp5_crtc_duplicate_state,
934         .atomic_destroy_state = mdp5_crtc_destroy_state,
935         .atomic_print_state = mdp5_crtc_atomic_print_state,
936 };
937
938 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
939         .mode_set_nofb = mdp5_crtc_mode_set_nofb,
940         .disable = mdp5_crtc_disable,
941         .enable = mdp5_crtc_enable,
942         .atomic_check = mdp5_crtc_atomic_check,
943         .atomic_begin = mdp5_crtc_atomic_begin,
944         .atomic_flush = mdp5_crtc_atomic_flush,
945 };
946
947 static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
948 {
949         struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
950         struct drm_crtc *crtc = &mdp5_crtc->base;
951         struct msm_drm_private *priv = crtc->dev->dev_private;
952         unsigned pending;
953
954         mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
955
956         pending = atomic_xchg(&mdp5_crtc->pending, 0);
957
958         if (pending & PENDING_FLIP) {
959                 complete_flip(crtc, NULL);
960         }
961
962         if (pending & PENDING_CURSOR)
963                 drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
964 }
965
966 static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
967 {
968         struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
969
970         DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
971 }
972
973 static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
974 {
975         struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
976                                                                 pp_done);
977
978         complete(&mdp5_crtc->pp_completion);
979 }
980
981 static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
982 {
983         struct drm_device *dev = crtc->dev;
984         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
985         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
986         int ret;
987
988         ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
989                                                 msecs_to_jiffies(50));
990         if (ret == 0)
991                 dev_warn(dev->dev, "pp done time out, lm=%d\n",
992                          mdp5_cstate->pipeline.mixer->lm);
993 }
994
995 static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
996 {
997         struct drm_device *dev = crtc->dev;
998         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
999         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1000         struct mdp5_ctl *ctl = mdp5_cstate->ctl;
1001         int ret;
1002
1003         /* Should not call this function if crtc is disabled. */
1004         if (!ctl)
1005                 return;
1006
1007         ret = drm_crtc_vblank_get(crtc);
1008         if (ret)
1009                 return;
1010
1011         ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
1012                 ((mdp5_ctl_get_commit_status(ctl) &
1013                 mdp5_crtc->flushed_mask) == 0),
1014                 msecs_to_jiffies(50));
1015         if (ret <= 0)
1016                 dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
1017
1018         mdp5_crtc->flushed_mask = 0;
1019
1020         drm_crtc_vblank_put(crtc);
1021 }
1022
1023 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
1024 {
1025         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1026         return mdp5_crtc->vblank.irqmask;
1027 }
1028
1029 void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
1030 {
1031         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1032         struct mdp5_kms *mdp5_kms = get_kms(crtc);
1033
1034         /* should this be done elsewhere ? */
1035         mdp_irq_update(&mdp5_kms->base);
1036
1037         mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
1038 }
1039
1040 struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
1041 {
1042         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1043
1044         return mdp5_cstate->ctl;
1045 }
1046
1047 struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
1048 {
1049         struct mdp5_crtc_state *mdp5_cstate;
1050
1051         if (WARN_ON(!crtc))
1052                 return ERR_PTR(-EINVAL);
1053
1054         mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1055
1056         return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
1057                 ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
1058 }
1059
1060 struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
1061 {
1062         struct mdp5_crtc_state *mdp5_cstate;
1063
1064         if (WARN_ON(!crtc))
1065                 return ERR_PTR(-EINVAL);
1066
1067         mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1068
1069         return &mdp5_cstate->pipeline;
1070 }
1071
1072 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
1073 {
1074         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1075
1076         if (mdp5_cstate->cmd_mode)
1077                 mdp5_crtc_wait_for_pp_done(crtc);
1078         else
1079                 mdp5_crtc_wait_for_flush_done(crtc);
1080 }
1081
1082 /* initialize crtc */
1083 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
1084                                 struct drm_plane *plane,
1085                                 struct drm_plane *cursor_plane, int id)
1086 {
1087         struct drm_crtc *crtc = NULL;
1088         struct mdp5_crtc *mdp5_crtc;
1089
1090         mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
1091         if (!mdp5_crtc)
1092                 return ERR_PTR(-ENOMEM);
1093
1094         crtc = &mdp5_crtc->base;
1095
1096         mdp5_crtc->id = id;
1097
1098         spin_lock_init(&mdp5_crtc->lm_lock);
1099         spin_lock_init(&mdp5_crtc->cursor.lock);
1100         init_completion(&mdp5_crtc->pp_completion);
1101
1102         mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
1103         mdp5_crtc->err.irq = mdp5_crtc_err_irq;
1104         mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
1105
1106         if (cursor_plane)
1107                 drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
1108                                           &mdp5_crtc_no_lm_cursor_funcs, NULL);
1109         else
1110                 drm_crtc_init_with_planes(dev, crtc, plane, NULL,
1111                                           &mdp5_crtc_funcs, NULL);
1112
1113         drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
1114                         "unref cursor", unref_cursor_worker);
1115
1116         drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
1117         plane->crtc = crtc;
1118
1119         return crtc;
1120 }