2 * Copyright (C) 2014 Free Electrons
3 * Copyright (C) 2014 Atmel
5 * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
23 #include "atmel_hlcdc_dc.h"
26 atmel_hlcdc_layer_fb_flip_release(struct drm_flip_work *work, void *val)
28 struct atmel_hlcdc_layer_fb_flip *flip = val;
31 drm_framebuffer_unreference(flip->fb);
36 atmel_hlcdc_layer_fb_flip_destroy(struct atmel_hlcdc_layer_fb_flip *flip)
39 drm_framebuffer_unreference(flip->fb);
45 atmel_hlcdc_layer_fb_flip_release_queue(struct atmel_hlcdc_layer *layer,
46 struct atmel_hlcdc_layer_fb_flip *flip)
53 for (i = 0; i < layer->max_planes; i++) {
57 flip->dscrs[i]->status = 0;
58 flip->dscrs[i] = NULL;
61 drm_flip_work_queue_task(&layer->gc, flip->task);
62 drm_flip_work_commit(&layer->gc, layer->wq);
65 static void atmel_hlcdc_layer_update_reset(struct atmel_hlcdc_layer *layer,
68 struct atmel_hlcdc_layer_update *upd = &layer->update;
69 struct atmel_hlcdc_layer_update_slot *slot;
74 slot = &upd->slots[id];
75 bitmap_clear(slot->updated_configs, 0, layer->desc->nconfigs);
76 memset(slot->configs, 0,
77 sizeof(*slot->configs) * layer->desc->nconfigs);
80 atmel_hlcdc_layer_fb_flip_release_queue(layer, slot->fb_flip);
85 static void atmel_hlcdc_layer_update_apply(struct atmel_hlcdc_layer *layer)
87 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
88 const struct atmel_hlcdc_layer_desc *desc = layer->desc;
89 struct atmel_hlcdc_layer_update *upd = &layer->update;
90 struct regmap *regmap = layer->hlcdc->regmap;
91 struct atmel_hlcdc_layer_update_slot *slot;
92 struct atmel_hlcdc_layer_fb_flip *fb_flip;
93 struct atmel_hlcdc_dma_channel_dscr *dscr;
98 if (upd->pending < 0 || upd->pending > 1)
101 slot = &upd->slots[upd->pending];
103 for_each_set_bit(cfg, slot->updated_configs, layer->desc->nconfigs) {
106 ATMEL_HLCDC_LAYER_CFG(layer, cfg),
108 action |= ATMEL_HLCDC_LAYER_UPDATE;
111 fb_flip = slot->fb_flip;
116 if (dma->status == ATMEL_HLCDC_LAYER_DISABLED) {
117 for (i = 0; i < fb_flip->ngems; i++) {
118 dscr = fb_flip->dscrs[i];
119 dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH |
120 ATMEL_HLCDC_LAYER_DMA_IRQ |
121 ATMEL_HLCDC_LAYER_ADD_IRQ |
122 ATMEL_HLCDC_LAYER_DONE_IRQ;
126 ATMEL_HLCDC_LAYER_PLANE_ADDR(i),
130 ATMEL_HLCDC_LAYER_PLANE_CTRL(i),
134 ATMEL_HLCDC_LAYER_PLANE_NEXT(i),
138 action |= ATMEL_HLCDC_LAYER_DMA_CHAN;
139 dma->status = ATMEL_HLCDC_LAYER_ENABLED;
141 for (i = 0; i < fb_flip->ngems; i++) {
142 dscr = fb_flip->dscrs[i];
143 dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH |
144 ATMEL_HLCDC_LAYER_DMA_IRQ |
145 ATMEL_HLCDC_LAYER_DSCR_IRQ |
146 ATMEL_HLCDC_LAYER_DONE_IRQ;
150 ATMEL_HLCDC_LAYER_PLANE_HEAD(i),
154 action |= ATMEL_HLCDC_LAYER_A2Q;
157 /* Release unneeded descriptors */
158 for (i = fb_flip->ngems; i < layer->max_planes; i++) {
159 fb_flip->dscrs[i]->status = 0;
160 fb_flip->dscrs[i] = NULL;
163 dma->queue = fb_flip;
164 slot->fb_flip = NULL;
169 desc->regs_offset + ATMEL_HLCDC_LAYER_CHER,
172 atmel_hlcdc_layer_update_reset(layer, upd->pending);
177 void atmel_hlcdc_layer_irq(struct atmel_hlcdc_layer *layer)
179 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
180 const struct atmel_hlcdc_layer_desc *desc = layer->desc;
181 struct regmap *regmap = layer->hlcdc->regmap;
182 struct atmel_hlcdc_layer_fb_flip *flip;
184 unsigned int isr, imr;
186 unsigned int plane_status;
191 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IMR, &imr);
192 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
197 spin_lock_irqsave(&layer->lock, flags);
199 flip = dma->queue ? dma->queue : dma->cur;
202 spin_unlock_irqrestore(&layer->lock, flags);
207 * Set LOADED and DONE flags: they'll be cleared if at least one
208 * memory plane is not LOADED or DONE.
210 flip_status = ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED |
211 ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
212 for (i = 0; i < flip->ngems; i++) {
213 plane_status = (status >> (8 * i));
216 (ATMEL_HLCDC_LAYER_ADD_IRQ |
217 ATMEL_HLCDC_LAYER_DSCR_IRQ) &
218 ~flip->dscrs[i]->ctrl) {
219 flip->dscrs[i]->status |=
220 ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED;
221 flip->dscrs[i]->ctrl |=
222 ATMEL_HLCDC_LAYER_ADD_IRQ |
223 ATMEL_HLCDC_LAYER_DSCR_IRQ;
227 ATMEL_HLCDC_LAYER_DONE_IRQ &
228 ~flip->dscrs[i]->ctrl) {
229 flip->dscrs[i]->status |=
230 ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
231 flip->dscrs[i]->ctrl |=
232 ATMEL_HLCDC_LAYER_DONE_IRQ;
235 if (plane_status & ATMEL_HLCDC_LAYER_OVR_IRQ)
236 flip->dscrs[i]->status |=
237 ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN;
240 * Clear LOADED and DONE flags if the memory plane is either
241 * not LOADED or not DONE.
243 if (!(flip->dscrs[i]->status &
244 ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED))
245 flip_status &= ~ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED;
247 if (!(flip->dscrs[i]->status &
248 ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE))
249 flip_status &= ~ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
252 * An overrun on one memory plane impact the whole framebuffer
253 * transfer, hence we set the OVERRUN flag as soon as there's
254 * one memory plane reporting such an overrun.
256 flip_status |= flip->dscrs[i]->status &
257 ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN;
260 /* Get changed bits */
261 flip_status ^= flip->status;
262 flip->status |= flip_status;
264 if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED) {
265 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
266 dma->cur = dma->queue;
270 if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE) {
271 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
275 if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN) {
277 desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
278 ATMEL_HLCDC_LAYER_RST);
280 atmel_hlcdc_layer_fb_flip_release_queue(layer,
284 atmel_hlcdc_layer_fb_flip_release_queue(layer,
292 atmel_hlcdc_layer_update_apply(layer);
295 dma->status = ATMEL_HLCDC_LAYER_DISABLED;
298 spin_unlock_irqrestore(&layer->lock, flags);
301 int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
303 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
304 struct atmel_hlcdc_layer_update *upd = &layer->update;
305 struct regmap *regmap = layer->hlcdc->regmap;
306 const struct atmel_hlcdc_layer_desc *desc = layer->desc;
310 spin_lock_irqsave(&layer->lock, flags);
312 /* Disable the layer */
313 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
314 ATMEL_HLCDC_LAYER_RST | ATMEL_HLCDC_LAYER_A2Q |
315 ATMEL_HLCDC_LAYER_UPDATE);
317 /* Clear all pending interrupts */
318 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
320 /* Discard current and queued framebuffer transfers. */
322 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
327 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->queue);
332 * Then discard the pending update request (if any) to prevent
333 * DMA irq handler from restarting the DMA channel after it has
336 if (upd->pending >= 0) {
337 atmel_hlcdc_layer_update_reset(layer, upd->pending);
341 dma->status = ATMEL_HLCDC_LAYER_DISABLED;
343 spin_unlock_irqrestore(&layer->lock, flags);
348 int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer)
350 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
351 struct atmel_hlcdc_layer_update *upd = &layer->update;
352 struct regmap *regmap = layer->hlcdc->regmap;
353 struct atmel_hlcdc_layer_fb_flip *fb_flip;
354 struct atmel_hlcdc_layer_update_slot *slot;
358 fb_flip = kzalloc(sizeof(*fb_flip), GFP_KERNEL);
362 fb_flip->task = drm_flip_work_allocate_task(fb_flip, GFP_KERNEL);
363 if (!fb_flip->task) {
368 spin_lock_irqsave(&layer->lock, flags);
370 upd->next = upd->pending ? 0 : 1;
372 slot = &upd->slots[upd->next];
374 for (i = 0; i < layer->max_planes * 4; i++) {
375 if (!dma->dscrs[i].status) {
376 fb_flip->dscrs[j++] = &dma->dscrs[i];
377 dma->dscrs[i].status =
378 ATMEL_HLCDC_DMA_CHANNEL_DSCR_RESERVED;
379 if (j == layer->max_planes)
384 if (j < layer->max_planes) {
385 for (i = 0; i < j; i++)
386 fb_flip->dscrs[i]->status = 0;
389 if (j < layer->max_planes) {
390 spin_unlock_irqrestore(&layer->lock, flags);
391 atmel_hlcdc_layer_fb_flip_destroy(fb_flip);
395 slot->fb_flip = fb_flip;
397 if (upd->pending >= 0) {
398 memcpy(slot->configs,
399 upd->slots[upd->pending].configs,
400 layer->desc->nconfigs * sizeof(u32));
401 memcpy(slot->updated_configs,
402 upd->slots[upd->pending].updated_configs,
403 DIV_ROUND_UP(layer->desc->nconfigs,
404 BITS_PER_BYTE * sizeof(unsigned long)) *
405 sizeof(unsigned long));
406 slot->fb_flip->fb = upd->slots[upd->pending].fb_flip->fb;
407 if (upd->slots[upd->pending].fb_flip->fb) {
409 upd->slots[upd->pending].fb_flip->fb;
410 slot->fb_flip->ngems =
411 upd->slots[upd->pending].fb_flip->ngems;
412 drm_framebuffer_reference(slot->fb_flip->fb);
415 regmap_bulk_read(regmap,
416 layer->desc->regs_offset +
417 ATMEL_HLCDC_LAYER_CFG(layer, 0),
418 upd->slots[upd->next].configs,
419 layer->desc->nconfigs);
422 spin_unlock_irqrestore(&layer->lock, flags);
427 void atmel_hlcdc_layer_update_rollback(struct atmel_hlcdc_layer *layer)
429 struct atmel_hlcdc_layer_update *upd = &layer->update;
431 atmel_hlcdc_layer_update_reset(layer, upd->next);
435 void atmel_hlcdc_layer_update_set_fb(struct atmel_hlcdc_layer *layer,
436 struct drm_framebuffer *fb,
437 unsigned int *offsets)
439 struct atmel_hlcdc_layer_update *upd = &layer->update;
440 struct atmel_hlcdc_layer_fb_flip *fb_flip;
441 struct atmel_hlcdc_layer_update_slot *slot;
442 struct atmel_hlcdc_dma_channel_dscr *dscr;
443 struct drm_framebuffer *old_fb;
447 if (upd->next < 0 || upd->next > 1)
451 nplanes = drm_format_num_planes(fb->pixel_format);
453 if (nplanes > layer->max_planes)
456 slot = &upd->slots[upd->next];
458 fb_flip = slot->fb_flip;
459 old_fb = slot->fb_flip->fb;
461 for (i = 0; i < nplanes; i++) {
462 struct drm_gem_cma_object *gem;
464 dscr = slot->fb_flip->dscrs[i];
465 gem = drm_fb_cma_get_gem_obj(fb, i);
466 dscr->addr = gem->paddr + offsets[i];
469 fb_flip->ngems = nplanes;
473 drm_framebuffer_reference(fb);
476 drm_framebuffer_unreference(old_fb);
479 void atmel_hlcdc_layer_update_cfg(struct atmel_hlcdc_layer *layer, int cfg,
482 struct atmel_hlcdc_layer_update *upd = &layer->update;
483 struct atmel_hlcdc_layer_update_slot *slot;
485 if (upd->next < 0 || upd->next > 1)
488 if (cfg >= layer->desc->nconfigs)
491 slot = &upd->slots[upd->next];
492 slot->configs[cfg] &= ~mask;
493 slot->configs[cfg] |= (val & mask);
494 set_bit(cfg, slot->updated_configs);
497 void atmel_hlcdc_layer_update_commit(struct atmel_hlcdc_layer *layer)
499 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
500 struct atmel_hlcdc_layer_update *upd = &layer->update;
501 struct atmel_hlcdc_layer_update_slot *slot;
504 if (upd->next < 0 || upd->next > 1)
507 slot = &upd->slots[upd->next];
509 spin_lock_irqsave(&layer->lock, flags);
512 * Release pending update request and replace it by the new one.
514 if (upd->pending >= 0)
515 atmel_hlcdc_layer_update_reset(layer, upd->pending);
517 upd->pending = upd->next;
521 atmel_hlcdc_layer_update_apply(layer);
523 spin_unlock_irqrestore(&layer->lock, flags);
529 static int atmel_hlcdc_layer_dma_init(struct drm_device *dev,
530 struct atmel_hlcdc_layer *layer)
532 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
536 dma->dscrs = dma_alloc_coherent(dev->dev,
537 layer->max_planes * 4 *
539 &dma_addr, GFP_KERNEL);
543 for (i = 0; i < layer->max_planes * 4; i++) {
544 struct atmel_hlcdc_dma_channel_dscr *dscr = &dma->dscrs[i];
546 dscr->next = dma_addr + (i * sizeof(*dscr));
552 static void atmel_hlcdc_layer_dma_cleanup(struct drm_device *dev,
553 struct atmel_hlcdc_layer *layer)
555 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
558 for (i = 0; i < layer->max_planes * 4; i++) {
559 struct atmel_hlcdc_dma_channel_dscr *dscr = &dma->dscrs[i];
564 dma_free_coherent(dev->dev, layer->max_planes * 4 *
565 sizeof(*dma->dscrs), dma->dscrs,
569 static int atmel_hlcdc_layer_update_init(struct drm_device *dev,
570 struct atmel_hlcdc_layer *layer,
571 const struct atmel_hlcdc_layer_desc *desc)
573 struct atmel_hlcdc_layer_update *upd = &layer->update;
578 updated_size = DIV_ROUND_UP(desc->nconfigs,
580 sizeof(unsigned long));
582 buffer = devm_kzalloc(dev->dev,
583 ((desc->nconfigs * sizeof(u32)) +
584 (updated_size * sizeof(unsigned long))) * 2,
589 for (i = 0; i < 2; i++) {
590 upd->slots[i].updated_configs = buffer;
591 buffer += updated_size * sizeof(unsigned long);
592 upd->slots[i].configs = buffer;
593 buffer += desc->nconfigs * sizeof(u32);
602 int atmel_hlcdc_layer_init(struct drm_device *dev,
603 struct atmel_hlcdc_layer *layer,
604 const struct atmel_hlcdc_layer_desc *desc)
606 struct atmel_hlcdc_dc *dc = dev->dev_private;
607 struct regmap *regmap = dc->hlcdc->regmap;
612 layer->hlcdc = dc->hlcdc;
616 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
617 ATMEL_HLCDC_LAYER_RST);
618 for (i = 0; i < desc->formats->nformats; i++) {
619 int nplanes = drm_format_num_planes(desc->formats->formats[i]);
621 if (nplanes > layer->max_planes)
622 layer->max_planes = nplanes;
625 spin_lock_init(&layer->lock);
626 drm_flip_work_init(&layer->gc, desc->name,
627 atmel_hlcdc_layer_fb_flip_release);
628 ret = atmel_hlcdc_layer_dma_init(dev, layer);
632 ret = atmel_hlcdc_layer_update_init(dev, layer, desc);
636 /* Flush Status Register */
637 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR,
639 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR,
643 for (i = 0; i < layer->max_planes; i++)
644 tmp |= (ATMEL_HLCDC_LAYER_DMA_IRQ |
645 ATMEL_HLCDC_LAYER_DSCR_IRQ |
646 ATMEL_HLCDC_LAYER_ADD_IRQ |
647 ATMEL_HLCDC_LAYER_DONE_IRQ |
648 ATMEL_HLCDC_LAYER_OVR_IRQ) << (8 * i);
650 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IER, tmp);
655 void atmel_hlcdc_layer_cleanup(struct drm_device *dev,
656 struct atmel_hlcdc_layer *layer)
658 const struct atmel_hlcdc_layer_desc *desc = layer->desc;
659 struct regmap *regmap = layer->hlcdc->regmap;
661 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR,
663 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
664 ATMEL_HLCDC_LAYER_RST);
666 atmel_hlcdc_layer_dma_cleanup(dev, layer);
667 drm_flip_work_cleanup(&layer->gc);