]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/vc4/vc4_kms.c
thermal: add the note for set_trip_temp
[karo-tx-linux.git] / drivers / gpu / drm / vc4 / vc4_kms.c
1 /*
2  * Copyright (C) 2015 Broadcom
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 /**
10  * DOC: VC4 KMS
11  *
12  * This is the general code for implementing KMS mode setting that
13  * doesn't clearly associate with any of the other objects (plane,
14  * crtc, HDMI encoder).
15  */
16
17 #include "drm_crtc.h"
18 #include "drm_atomic.h"
19 #include "drm_atomic_helper.h"
20 #include "drm_crtc_helper.h"
21 #include "drm_plane_helper.h"
22 #include "drm_fb_cma_helper.h"
23 #include "vc4_drv.h"
24
25 static void vc4_output_poll_changed(struct drm_device *dev)
26 {
27         struct vc4_dev *vc4 = to_vc4_dev(dev);
28
29         if (vc4->fbdev)
30                 drm_fbdev_cma_hotplug_event(vc4->fbdev);
31 }
32
33 struct vc4_commit {
34         struct drm_device *dev;
35         struct drm_atomic_state *state;
36         struct vc4_seqno_cb cb;
37 };
38
39 static void
40 vc4_atomic_complete_commit(struct vc4_commit *c)
41 {
42         struct drm_atomic_state *state = c->state;
43         struct drm_device *dev = state->dev;
44         struct vc4_dev *vc4 = to_vc4_dev(dev);
45
46         drm_atomic_helper_commit_modeset_disables(dev, state);
47
48         drm_atomic_helper_commit_planes(dev, state, false);
49
50         drm_atomic_helper_commit_modeset_enables(dev, state);
51
52         /* Make sure that drm_atomic_helper_wait_for_vblanks()
53          * actually waits for vblank.  If we're doing a full atomic
54          * modeset (as opposed to a vc4_update_plane() short circuit),
55          * then we need to wait for scanout to be done with our
56          * display lists before we free it and potentially reallocate
57          * and overwrite the dlist memory with a new modeset.
58          */
59         state->legacy_cursor_update = false;
60
61         drm_atomic_helper_wait_for_vblanks(dev, state);
62
63         drm_atomic_helper_cleanup_planes(dev, state);
64
65         drm_atomic_state_free(state);
66
67         up(&vc4->async_modeset);
68
69         kfree(c);
70 }
71
72 static void
73 vc4_atomic_complete_commit_seqno_cb(struct vc4_seqno_cb *cb)
74 {
75         struct vc4_commit *c = container_of(cb, struct vc4_commit, cb);
76
77         vc4_atomic_complete_commit(c);
78 }
79
80 static struct vc4_commit *commit_init(struct drm_atomic_state *state)
81 {
82         struct vc4_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
83
84         if (!c)
85                 return NULL;
86         c->dev = state->dev;
87         c->state = state;
88
89         return c;
90 }
91
92 /**
93  * vc4_atomic_commit - commit validated state object
94  * @dev: DRM device
95  * @state: the driver state object
96  * @nonblock: nonblocking commit
97  *
98  * This function commits a with drm_atomic_helper_check() pre-validated state
99  * object. This can still fail when e.g. the framebuffer reservation fails. For
100  * now this doesn't implement asynchronous commits.
101  *
102  * RETURNS
103  * Zero for success or -errno.
104  */
105 static int vc4_atomic_commit(struct drm_device *dev,
106                              struct drm_atomic_state *state,
107                              bool nonblock)
108 {
109         struct vc4_dev *vc4 = to_vc4_dev(dev);
110         int ret;
111         int i;
112         uint64_t wait_seqno = 0;
113         struct vc4_commit *c;
114
115         c = commit_init(state);
116         if (!c)
117                 return -ENOMEM;
118
119         /* Make sure that any outstanding modesets have finished. */
120         ret = down_interruptible(&vc4->async_modeset);
121         if (ret) {
122                 kfree(c);
123                 return ret;
124         }
125
126         ret = drm_atomic_helper_prepare_planes(dev, state);
127         if (ret) {
128                 kfree(c);
129                 up(&vc4->async_modeset);
130                 return ret;
131         }
132
133         for (i = 0; i < dev->mode_config.num_total_plane; i++) {
134                 struct drm_plane *plane = state->planes[i];
135                 struct drm_plane_state *new_state = state->plane_states[i];
136
137                 if (!plane)
138                         continue;
139
140                 if ((plane->state->fb != new_state->fb) && new_state->fb) {
141                         struct drm_gem_cma_object *cma_bo =
142                                 drm_fb_cma_get_gem_obj(new_state->fb, 0);
143                         struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
144
145                         wait_seqno = max(bo->seqno, wait_seqno);
146                 }
147         }
148
149         /*
150          * This is the point of no return - everything below never fails except
151          * when the hw goes bonghits. Which means we can commit the new state on
152          * the software side now.
153          */
154
155         drm_atomic_helper_swap_state(dev, state);
156
157         /*
158          * Everything below can be run asynchronously without the need to grab
159          * any modeset locks at all under one condition: It must be guaranteed
160          * that the asynchronous work has either been cancelled (if the driver
161          * supports it, which at least requires that the framebuffers get
162          * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
163          * before the new state gets committed on the software side with
164          * drm_atomic_helper_swap_state().
165          *
166          * This scheme allows new atomic state updates to be prepared and
167          * checked in parallel to the asynchronous completion of the previous
168          * update. Which is important since compositors need to figure out the
169          * composition of the next frame right after having submitted the
170          * current layout.
171          */
172
173         if (nonblock) {
174                 vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
175                                    vc4_atomic_complete_commit_seqno_cb);
176         } else {
177                 vc4_wait_for_seqno(dev, wait_seqno, ~0ull, false);
178                 vc4_atomic_complete_commit(c);
179         }
180
181         return 0;
182 }
183
184 static const struct drm_mode_config_funcs vc4_mode_funcs = {
185         .output_poll_changed = vc4_output_poll_changed,
186         .atomic_check = drm_atomic_helper_check,
187         .atomic_commit = vc4_atomic_commit,
188         .fb_create = drm_fb_cma_create,
189 };
190
191 int vc4_kms_load(struct drm_device *dev)
192 {
193         struct vc4_dev *vc4 = to_vc4_dev(dev);
194         int ret;
195
196         sema_init(&vc4->async_modeset, 1);
197
198         ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
199         if (ret < 0) {
200                 dev_err(dev->dev, "failed to initialize vblank\n");
201                 return ret;
202         }
203
204         dev->mode_config.max_width = 2048;
205         dev->mode_config.max_height = 2048;
206         dev->mode_config.funcs = &vc4_mode_funcs;
207         dev->mode_config.preferred_depth = 24;
208         dev->mode_config.async_page_flip = true;
209
210         drm_mode_config_reset(dev);
211
212         vc4->fbdev = drm_fbdev_cma_init(dev, 32,
213                                         dev->mode_config.num_crtc,
214                                         dev->mode_config.num_connector);
215         if (IS_ERR(vc4->fbdev))
216                 vc4->fbdev = NULL;
217
218         drm_kms_helper_poll_init(dev);
219
220         return 0;
221 }