]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/mxc/vpu/mxc_vpu.c
mxc: vpu: remove iramsize property from DT and set it in the driver based on 'compati...
[karo-tx-linux.git] / drivers / mxc / vpu / mxc_vpu.c
1 /*
2  * Copyright 2006-2013 Freescale Semiconductor, Inc. All Rights Reserved.
3  */
4
5 /*
6  * The code contained herein is licensed under the GNU General Public
7  * License. You may obtain a copy of the GNU General Public License
8  * Version 2 or later at the following locations:
9  *
10  * http://www.opensource.org/licenses/gpl-license.html
11  * http://www.gnu.org/copyleft/gpl.html
12  */
13
14 /*!
15  * @file mxc_vpu.c
16  *
17  * @brief VPU system initialization and file operation implementation
18  *
19  * @ingroup VPU
20  */
21
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/stat.h>
27 #include <linux/platform_device.h>
28 #include <linux/kdev_t.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/wait.h>
31 #include <linux/list.h>
32 #include <linux/clk.h>
33 #include <linux/delay.h>
34 #include <linux/fsl_devices.h>
35 #include <linux/uaccess.h>
36 #include <linux/io.h>
37 #include <linux/slab.h>
38 #include <linux/workqueue.h>
39 #include <linux/sched.h>
40 #include <linux/vmalloc.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/page-flags.h>
43 #include <linux/mm_types.h>
44 #include <linux/types.h>
45 #include <linux/memblock.h>
46 #include <linux/memory.h>
47 #include <linux/version.h>
48 #include <linux/module.h>
49 #include <linux/pm_runtime.h>
50 #include <linux/sizes.h>
51 #include <linux/genalloc.h>
52 #include <linux/of.h>
53 #include <linux/of_device.h>
54 #include <linux/reset.h>
55 #include <linux/clk.h>
56 #include <linux/mxc_vpu.h>
57
58 /* Define one new pgprot which combined uncached and XN(never executable) */
59 #define pgprot_noncachedxn(prot) \
60         __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
61
62 struct vpu_priv {
63         struct fasync_struct *async_queue;
64         struct work_struct work;
65         struct workqueue_struct *workqueue;
66         struct mutex lock;
67         const struct mxc_vpu_soc_data *soc_data;
68         int clk_enabled;
69 };
70
71 struct vpu_user_data {
72         struct vpu_priv *vpu_data;
73         int clk_enable_cnt;
74 };
75
76 /* To track the allocated memory buffer */
77 struct memalloc_record {
78         struct list_head list;
79         struct vpu_mem_desc mem;
80 };
81
82 struct iram_setting {
83         u32 start;
84         u32 end;
85 };
86
87 struct mxc_vpu_soc_data {
88         unsigned vpu_pwr_mgmnt:1,
89                 regulator_required:1,
90                 quirk_subblk_en:1,
91                 is_mx51:1,
92                 is_mx53:1,
93                 is_mx6dl:1,
94                 is_mx6q:1,
95                 has_jpu:1;
96         size_t iramsize;
97 };
98
99 static struct gen_pool *iram_pool;
100 static u32 iram_base;
101
102 static LIST_HEAD(mem_list);
103
104 static int vpu_major;
105 static struct class *vpu_class;
106 static struct vpu_priv *vpu_data;
107 static u8 open_count;
108 static struct clk *vpu_clk;
109 static struct vpu_mem_desc bitwork_mem;
110 static struct vpu_mem_desc pic_para_mem;
111 static struct vpu_mem_desc user_data_mem;
112 static struct vpu_mem_desc share_mem;
113 static struct vpu_mem_desc vshare_mem;
114
115 static void __iomem *vpu_base;
116 static int vpu_ipi_irq;
117 static u32 phy_vpu_base_addr;
118
119 static struct device *vpu_dev;
120
121 /* IRAM setting */
122 static struct iram_setting iram;
123
124 /* implement the blocking ioctl */
125 static int irq_status;
126 static int codec_done;
127 static wait_queue_head_t vpu_queue;
128
129 static int vpu_jpu_irq;
130
131 #ifdef CONFIG_PM_SLEEP
132 static unsigned int regBk[64];
133 static unsigned int pc_before_suspend;
134 #endif
135 static struct regulator *vpu_regulator;
136
137 #define READ_REG(x)             readl_relaxed(vpu_base + (x))
138 #define WRITE_REG(val, x)       writel_relaxed(val, vpu_base + (x))
139
140 static int vpu_clk_enable(struct vpu_priv *vpu_data)
141 {
142         if (WARN_ON(vpu_data->clk_enabled < 0))
143                 return -EINVAL;
144
145         if (vpu_data->clk_enabled++ == 0)
146                 return clk_prepare_enable(vpu_clk);
147
148         return 0;
149 }
150
151 static int vpu_clk_disable(struct vpu_priv *vpu_data)
152 {
153         if (WARN_ON(vpu_data->clk_enabled <= 0))
154                 return -EINVAL;
155
156         if (--vpu_data->clk_enabled == 0)
157                 clk_disable_unprepare(vpu_clk);
158         return 0;
159 }
160
161 static inline int vpu_reset(void)
162 {
163         return device_reset(vpu_dev);
164 }
165
166 static void vpu_power_up(void)
167 {
168         int ret;
169
170         if (IS_ERR(vpu_regulator))
171                 return;
172
173         ret = regulator_enable(vpu_regulator);
174         if (ret)
175                 dev_err(vpu_dev, "failed to power up vpu: %d\n", ret);
176 }
177
178 static void vpu_power_down(void)
179 {
180         int ret;
181
182         if (IS_ERR(vpu_regulator))
183                 return;
184
185         ret = regulator_disable(vpu_regulator);
186         if (ret)
187                 dev_err(vpu_dev, "failed to power down vpu: %d\n", ret);
188 }
189
190 /*!
191  * Private function to alloc dma buffer
192  * @return status  0 success.
193  */
194 static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
195 {
196         mem->cpu_addr = dma_alloc_coherent(vpu_dev, PAGE_ALIGN(mem->size),
197                                         &mem->phy_addr,
198                                         GFP_DMA | GFP_KERNEL);
199         dev_dbg(vpu_dev, "[ALLOC] mem alloc cpu_addr = %p\n", mem->cpu_addr);
200         if (mem->cpu_addr == NULL) {
201                 dev_err(vpu_dev, "Physical memory allocation error!\n");
202                 return -ENOMEM;
203         }
204         return 0;
205 }
206
207 /*!
208  * Private function to free dma buffer
209  */
210 static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
211 {
212         if (mem->cpu_addr != NULL)
213                 dma_free_coherent(vpu_dev, PAGE_ALIGN(mem->size),
214                                 mem->cpu_addr, mem->phy_addr);
215 }
216
217 /*!
218  * Private function to free buffers
219  * @return status  0 success.
220  */
221 static int vpu_free_buffers(void)
222 {
223         struct memalloc_record *rec, *n;
224         struct vpu_mem_desc mem;
225
226         list_for_each_entry_safe(rec, n, &mem_list, list) {
227                 mem = rec->mem;
228                 if (mem.cpu_addr != 0) {
229                         vpu_free_dma_buffer(&mem);
230                         dev_dbg(vpu_dev, "[FREE] freed paddr=0x%08X\n", mem.phy_addr);
231                         /* delete from list */
232                         list_del(&rec->list);
233                         kfree(rec);
234                 }
235         }
236
237         return 0;
238 }
239
240 static inline void vpu_worker_callback(struct work_struct *w)
241 {
242         struct vpu_priv *dev = container_of(w, struct vpu_priv, work);
243
244         if (dev->async_queue)
245                 kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
246
247         irq_status = 1;
248         /*
249          * Clock is gated on when dec/enc started, gate it off when
250          * codec is done.
251          */
252         if (codec_done)
253                 codec_done = 0;
254
255         wake_up_interruptible(&vpu_queue);
256 }
257
258 /*!
259  * @brief vpu interrupt handler
260  */
261 static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
262 {
263         struct vpu_priv *dev = dev_id;
264         unsigned long reg;
265
266         reg = READ_REG(BIT_INT_REASON);
267         if (reg & 0x8)
268                 codec_done = 1;
269         WRITE_REG(0x1, BIT_INT_CLEAR);
270
271         queue_work(dev->workqueue, &dev->work);
272
273         return IRQ_HANDLED;
274 }
275
276 /*!
277  * @brief vpu jpu interrupt handler
278  */
279 static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
280 {
281         struct vpu_priv *dev = dev_id;
282         unsigned long reg;
283
284         reg = READ_REG(MJPEG_PIC_STATUS_REG);
285         if (reg & 0x3)
286                 codec_done = 1;
287
288         queue_work(dev->workqueue, &dev->work);
289
290         return IRQ_HANDLED;
291 }
292
293 /*!
294  * @brief open function for vpu file operation
295  *
296  * @return  0 on success or negative error code on error
297  */
298 static int vpu_open(struct inode *inode, struct file *filp)
299 {
300         struct vpu_user_data *user_data = devm_kzalloc(vpu_dev,
301                                                 sizeof(*user_data),
302                                                 GFP_KERNEL);
303         if (user_data == NULL)
304                 return -ENOMEM;
305
306         user_data->vpu_data = vpu_data;
307
308         mutex_lock(&vpu_data->lock);
309
310         if (open_count++ == 0) {
311                 pm_runtime_get_sync(vpu_dev);
312                 vpu_power_up();
313         }
314
315         filp->private_data = user_data;
316         mutex_unlock(&vpu_data->lock);
317         return 0;
318 }
319
320 /*!
321  * @brief IO ctrl function for vpu file operation
322  * @param cmd IO ctrl command
323  * @return  0 on success or negative error code on error
324  */
325 static long vpu_ioctl(struct file *filp, u_int cmd,
326                      u_long arg)
327 {
328         int ret;
329         struct vpu_user_data *user_data = filp->private_data;
330         struct vpu_priv *vpu_data = user_data->vpu_data;
331
332         switch (cmd) {
333         case VPU_IOC_PHYMEM_ALLOC:
334         {
335                 struct memalloc_record *rec;
336
337                 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
338                 if (!rec)
339                         return -ENOMEM;
340
341                 if (copy_from_user(&rec->mem,
342                                         (struct vpu_mem_desc *)arg,
343                                         sizeof(struct vpu_mem_desc))) {
344                         kfree(rec);
345                         return -EFAULT;
346                 }
347
348                 dev_dbg(vpu_dev, "[ALLOC] mem alloc size = 0x%x\n",
349                         rec->mem.size);
350
351                 ret = vpu_alloc_dma_buffer(&rec->mem);
352                 if (ret) {
353                         kfree(rec);
354                         return ret;
355                 }
356                 if (copy_to_user((void __user *)arg, &rec->mem,
357                                         sizeof(struct vpu_mem_desc))) {
358                         kfree(rec);
359                         return -EFAULT;
360                 }
361
362                 mutex_lock(&vpu_data->lock);
363                 list_add(&rec->list, &mem_list);
364                 mutex_unlock(&vpu_data->lock);
365
366                 break;
367         }
368         case VPU_IOC_PHYMEM_FREE:
369         {
370                 struct memalloc_record *rec, *n;
371                 struct vpu_mem_desc vpu_mem;
372
373                 if (copy_from_user(&vpu_mem,
374                                         (struct vpu_mem_desc *)arg,
375                                         sizeof(struct vpu_mem_desc)))
376                         return -EFAULT;
377
378                 dev_dbg(vpu_dev, "[FREE] mem freed cpu_addr = %p\n",
379                         vpu_mem.cpu_addr);
380                 if (vpu_mem.cpu_addr != NULL)
381                         vpu_free_dma_buffer(&vpu_mem);
382
383                 mutex_lock(&vpu_data->lock);
384                 list_for_each_entry_safe(rec, n, &mem_list, list) {
385                         if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
386                                 list_del(&rec->list);
387                                 break;
388                         }
389                 }
390                 kfree(rec);
391                 mutex_unlock(&vpu_data->lock);
392
393                 ret = 0;
394                 break;
395         }
396         case VPU_IOC_WAIT4INT:
397         {
398                 u_long timeout = arg;
399
400                 ret = wait_event_interruptible_timeout(vpu_queue,
401                                                 irq_status != 0,
402                                                 msecs_to_jiffies(timeout));
403                 if (ret == 0) {
404                         dev_warn(vpu_dev, "VPU blocking: timeout.\n");
405                         ret = -ETIMEDOUT;
406                 } else if (signal_pending(current)) {
407                         dev_warn(vpu_dev, "VPU interrupt received.\n");
408                         ret = -ERESTARTSYS;
409                 } else {
410                         ret = irq_status = 0;
411                 }
412                 break;
413         }
414         case VPU_IOC_IRAM_SETTING:
415                 ret = copy_to_user((void __user *)arg, &iram,
416                                 sizeof(struct iram_setting));
417                 if (ret)
418                         ret = -EFAULT;
419
420                 break;
421         case VPU_IOC_CLKGATE_SETTING:
422         {
423                 u32 clkgate_en;
424
425                 if (get_user(clkgate_en, (u32 __user *)arg))
426                         return -EFAULT;
427
428                 mutex_lock(&vpu_data->lock);
429                 if (clkgate_en) {
430                         ret = vpu_clk_enable(vpu_data);
431                         if (ret == 0)
432                                 user_data->clk_enable_cnt++;
433                 } else {
434                         if (user_data->clk_enable_cnt == 0) {
435                                 ret = -EINVAL;
436                         } else {
437                                 if (--user_data->clk_enable_cnt == 0)
438                                         vpu_clk_disable(vpu_data);
439                                 ret = 0;
440                         }
441                 }
442                 mutex_unlock(&vpu_data->lock);
443                 break;
444         }
445         case VPU_IOC_GET_SHARE_MEM:
446                 mutex_lock(&vpu_data->lock);
447                 if (share_mem.cpu_addr == NULL) {
448                         if (copy_from_user(&share_mem,
449                                                 (struct vpu_mem_desc *)arg,
450                                                 sizeof(struct vpu_mem_desc))) {
451                                 mutex_unlock(&vpu_data->lock);
452                                 return -EFAULT;
453                         }
454                         ret = vpu_alloc_dma_buffer(&share_mem);
455                         if (ret) {
456                                 mutex_unlock(&vpu_data->lock);
457                                 return ret;
458                         }
459                 }
460                 if (copy_to_user((void __user *)arg,
461                                         &share_mem,
462                                         sizeof(struct vpu_mem_desc)))
463                         ret = -EFAULT;
464                 else
465                         ret = 0;
466                 mutex_unlock(&vpu_data->lock);
467                 break;
468         case VPU_IOC_REQ_VSHARE_MEM:
469                 mutex_lock(&vpu_data->lock);
470                 if (vshare_mem.cpu_addr == NULL) {
471                         if (copy_from_user(&vshare_mem,
472                                                 (struct vpu_mem_desc *)arg,
473                                                 sizeof(struct
474                                                         vpu_mem_desc))) {
475                                 mutex_unlock(&vpu_data->lock);
476                                 return -EFAULT;
477                         }
478                         vshare_mem.cpu_addr = vmalloc_user(vshare_mem.size);
479                         if (vshare_mem.cpu_addr == NULL) {
480                                 mutex_unlock(&vpu_data->lock);
481                                 return -ENOMEM;
482                         }
483                 }
484                 if (copy_to_user((void __user *)arg, &vshare_mem,
485                                         sizeof(struct vpu_mem_desc)))
486                         ret = -EFAULT;
487                 else
488                         ret = 0;
489                 mutex_unlock(&vpu_data->lock);
490                 break;
491         case VPU_IOC_GET_WORK_ADDR:
492                 if (bitwork_mem.cpu_addr == 0) {
493                         if (copy_from_user(&bitwork_mem,
494                                                 (struct vpu_mem_desc *)arg,
495                                                 sizeof(struct vpu_mem_desc)))
496                                 return -EFAULT;
497
498                         ret = vpu_alloc_dma_buffer(&bitwork_mem);
499                         if (ret)
500                                 return ret;
501                 }
502                 if (copy_to_user((void __user *)arg,
503                                         &bitwork_mem,
504                                         sizeof(struct
505                                                 vpu_mem_desc)))
506                         ret = -EFAULT;
507                 else
508                         ret = 0;
509                 break;
510         /*
511          * The following two ioctls are used when user allocates a working buffer
512          * and registers it to vpu driver.
513          */
514         case VPU_IOC_QUERY_BITWORK_MEM:
515                 if (copy_to_user((void __user *)arg,
516                                         &bitwork_mem,
517                                         sizeof(struct vpu_mem_desc)))
518                         ret = -EFAULT;
519                 else
520                         ret = 0;
521                 break;
522         case VPU_IOC_SET_BITWORK_MEM:
523                 if (copy_from_user(&bitwork_mem,
524                                         (struct vpu_mem_desc *)arg,
525                                         sizeof(struct vpu_mem_desc)))
526                         ret = -EFAULT;
527                 else
528                         ret = 0;
529                 break;
530         case VPU_IOC_SYS_SW_RESET:
531                 ret = vpu_reset();
532                 break;
533         case VPU_IOC_REG_DUMP:
534         case VPU_IOC_PHYMEM_DUMP:
535                 ret = -ENOTSUPP;
536                 break;
537         case VPU_IOC_PHYMEM_CHECK:
538         {
539                 struct vpu_mem_desc check_memory;
540
541                 if (copy_from_user(&check_memory, (void __user *)arg,
542                                         sizeof(struct vpu_mem_desc)))
543                         return -EFAULT;
544
545                 check_memory.size = 1;
546                 if (copy_to_user((void __user *)arg, &check_memory,
547                                         sizeof(struct vpu_mem_desc)))
548                         ret = -EFAULT;
549                 else
550                         ret = 0;
551                 break;
552         }
553         case VPU_IOC_LOCK_DEV:
554         {
555                 u32 lock_en;
556
557                 if (get_user(lock_en, (u32 __user *)arg))
558                         return -EFAULT;
559
560                 if (lock_en)
561                         mutex_lock(&vpu_data->lock);
562                 else
563                         mutex_unlock(&vpu_data->lock);
564                 ret = 0;
565                 break;
566         }
567         default:
568                 dev_err(vpu_dev, "No such IOCTL, cmd is %d\n", cmd);
569                 ret = -EINVAL;
570         }
571         return ret;
572 }
573
574 /*!
575  * @brief Release function for vpu file operation
576  * @return  0 on success or negative error code on error
577  */
578 static int vpu_release(struct inode *inode, struct file *filp)
579 {
580         unsigned long timeout;
581         struct vpu_user_data *user_data = filp->private_data;
582         struct vpu_priv *vpu_data = user_data->vpu_data;
583
584         mutex_lock(&vpu_data->lock);
585
586         if (open_count > 0 && !--open_count) {
587                 /* Wait for vpu go to idle state */
588                 vpu_clk_enable(vpu_data);
589                 if (READ_REG(BIT_CUR_PC)) {
590
591                         timeout = jiffies + HZ;
592                         while (READ_REG(BIT_BUSY_FLAG)) {
593                                 msleep(1);
594                                 if (time_after(jiffies, timeout)) {
595                                         dev_warn(vpu_dev, "VPU timeout during release\n");
596                                         break;
597                                 }
598                         }
599
600                         /* Clean up interrupt */
601                         cancel_work_sync(&vpu_data->work);
602                         flush_workqueue(vpu_data->workqueue);
603                         irq_status = 0;
604
605                         if (READ_REG(BIT_BUSY_FLAG)) {
606                                 if (vpu_data->soc_data->is_mx51 ||
607                                         vpu_data->soc_data->is_mx53) {
608                                         dev_err(vpu_dev,
609                                                 "fatal error: can't gate/power off when VPU is busy\n");
610                                         vpu_clk_disable(vpu_data);
611                                         mutex_unlock(&vpu_data->lock);
612                                         return -EBUSY;
613                                 }
614                                 if (vpu_data->soc_data->is_mx6dl ||
615                                         vpu_data->soc_data->is_mx6q) {
616                                         WRITE_REG(0x11, 0x10F0);
617                                         timeout = jiffies + HZ;
618                                         while (READ_REG(0x10F4) != 0x77) {
619                                                 msleep(1);
620                                                 if (time_after(jiffies, timeout))
621                                                         break;
622                                         }
623
624                                         if (READ_REG(0x10F4) != 0x77) {
625                                                 dev_err(vpu_dev,
626                                                         "fatal error: can't gate/power off when VPU is busy\n");
627                                                 WRITE_REG(0x0, 0x10F0);
628                                                 vpu_clk_disable(vpu_data);
629                                                 mutex_unlock(&vpu_data->lock);
630                                                 return -EBUSY;
631                                         }
632                                         vpu_reset();
633                                 }
634                         }
635                 }
636
637                 vpu_free_buffers();
638
639                 /* Free shared memory when vpu device is idle */
640                 vpu_free_dma_buffer(&share_mem);
641                 share_mem.cpu_addr = 0;
642                 vfree(vshare_mem.cpu_addr);
643                 vshare_mem.cpu_addr = 0;
644
645                 if (user_data->clk_enable_cnt)
646                         vpu_clk_disable(vpu_data);
647
648                 vpu_clk_disable(vpu_data);
649                 vpu_power_down();
650                 pm_runtime_put_sync_suspend(vpu_dev);
651                 devm_kfree(vpu_dev, user_data);
652         }
653         mutex_unlock(&vpu_data->lock);
654
655         return 0;
656 }
657
658 /*!
659  * @brief fasync function for vpu file operation
660  * @return  0 on success or negative error code on error
661  */
662 static int vpu_fasync(int fd, struct file *filp, int mode)
663 {
664         struct vpu_user_data *user_data = filp->private_data;
665         struct vpu_priv *vpu_data = user_data->vpu_data;
666         return fasync_helper(fd, filp, mode, &vpu_data->async_queue);
667 }
668
669 /*!
670  * @brief memory map function of harware registers for vpu file operation
671  * @return  0 on success or negative error code on error
672  */
673 static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
674 {
675         unsigned long pfn;
676
677         vm->vm_flags |= VM_IO;
678         /*
679          * Since vpu registers have been mapped with ioremap() at probe
680          * which L_PTE_XN is 1, and the same physical address must be
681          * mapped multiple times with same type, so set L_PTE_XN to 1 here.
682          * Otherwise, there may be unexpected result in video codec.
683          */
684         vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
685         pfn = phy_vpu_base_addr >> PAGE_SHIFT;
686         dev_dbg(vpu_dev, "size=0x%08lx, page no.=0x%08lx\n",
687                  vm->vm_end - vm->vm_start, pfn);
688         return remap_pfn_range(vm, vm->vm_start, pfn,
689                         vm->vm_end - vm->vm_start,
690                         vm->vm_page_prot) ? -EAGAIN : 0;
691 }
692
693 /*!
694  * @brief memory map function of memory for vpu file operation
695  * @return  0 on success or negative error code on error
696  */
697 static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
698 {
699         size_t request_size = vm->vm_end - vm->vm_start;
700
701         dev_dbg(vpu_dev, "start=0x%08lx, pgoff=0x%08lx, size=%zx\n",
702                 vm->vm_start, vm->vm_pgoff, request_size);
703
704         vm->vm_flags |= VM_IO;
705         vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
706
707         return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
708                                request_size, vm->vm_page_prot) ? -EAGAIN : 0;
709 }
710
711 /* !
712  * @brief memory map function of vmalloced share memory
713  * @return  0 on success or negative error code on error
714  */
715 static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
716 {
717         int ret;
718
719         ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
720         vm->vm_flags |= VM_IO;
721         return ret;
722 }
723 /*!
724  * @brief memory map interface for vpu file operation
725  * @return  0 on success or negative error code on error
726  */
727 static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
728 {
729         unsigned long offset;
730
731         offset = (unsigned long)vshare_mem.cpu_addr >> PAGE_SHIFT;
732
733         if (vm->vm_pgoff && (vm->vm_pgoff == offset))
734                 return vpu_map_vshare_mem(fp, vm);
735         else if (vm->vm_pgoff)
736                 return vpu_map_dma_mem(fp, vm);
737         else
738                 return vpu_map_hwregs(fp, vm);
739 }
740
741 static const struct file_operations vpu_fops = {
742         .owner = THIS_MODULE,
743         .open = vpu_open,
744         .unlocked_ioctl = vpu_ioctl,
745         .release = vpu_release,
746         .fasync = vpu_fasync,
747         .mmap = vpu_mmap,
748 };
749
750 static const struct mxc_vpu_soc_data imx6dl_vpu_data = {
751         .regulator_required = 1,
752         .vpu_pwr_mgmnt = 1,
753         .has_jpu = 1,
754 };
755
756 static const struct mxc_vpu_soc_data imx6q_vpu_data = {
757         .quirk_subblk_en = 1,
758         .regulator_required = 1,
759         .vpu_pwr_mgmnt = 1,
760         .has_jpu = 1,
761         .iramsize = 0x21000,
762 };
763
764 static const struct mxc_vpu_soc_data imx53_vpu_data = {
765 };
766
767 static const struct mxc_vpu_soc_data imx51_vpu_data = {
768         .vpu_pwr_mgmnt = 1,
769 };
770
771 static const struct of_device_id vpu_of_match[] = {
772         { .compatible = "fsl,imx6dl-vpu", .data = &imx6dl_vpu_data, },
773         { .compatible = "fsl,imx6q-vpu", .data = &imx6q_vpu_data, },
774         { .compatible = "fsl,imx53-vpu", .data = &imx53_vpu_data, },
775         { .compatible = "fsl,imx51-vpu", .data = &imx51_vpu_data, },
776         { /* sentinel */ }
777 };
778 MODULE_DEVICE_TABLE(of, vpu_of_match);
779
780 /*!
781  * This function is called by the driver framework to initialize the vpu device.
782  * @param   dev The device structure for the vpu passed in by the framework.
783  * @return   0 on success or negative error code on error
784  */
785 static int vpu_dev_probe(struct platform_device *pdev)
786 {
787         int err = 0;
788         struct device *temp_class;
789         struct resource *res;
790         struct device_node *np = pdev->dev.of_node;
791         struct vpu_priv *drv_data;
792         const struct of_device_id *of_id = of_match_device(vpu_of_match,
793                                                         &pdev->dev);
794         const struct mxc_vpu_soc_data *soc_data = of_id->data;
795
796         drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
797         if (drv_data == NULL)
798                 return -ENOMEM;
799
800         drv_data->soc_data = soc_data;
801         mutex_init(&drv_data->lock);
802
803         init_waitqueue_head(&vpu_queue);
804         drv_data->workqueue = create_workqueue("vpu_wq");
805         INIT_WORK(&drv_data->work, vpu_worker_callback);
806
807         if (soc_data->iramsize) {
808                 iram_pool = of_get_named_gen_pool(np, "iram", 0);
809                 if (!iram_pool) {
810                         dev_err(&pdev->dev, "iram pool not available\n");
811                         return -ENOMEM;
812                 }
813
814                 iram_base = gen_pool_alloc(iram_pool, soc_data->iramsize);
815                 if (!iram_base) {
816                         dev_err(&pdev->dev, "unable to alloc iram\n");
817                         return -ENOMEM;
818                 }
819
820                 iram.start = gen_pool_virt_to_phys(iram_pool, iram_base);
821                 iram.end = iram.start + soc_data->iramsize - 1;
822         }
823
824         vpu_dev = &pdev->dev;
825
826         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
827         vpu_base = devm_ioremap_resource(&pdev->dev, res);
828         if (IS_ERR(vpu_base))
829                 return PTR_ERR(vpu_base);
830         phy_vpu_base_addr = res->start;
831
832         vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
833         if (vpu_major < 0) {
834                 dev_err(vpu_dev, "vpu: unable to get a major for VPU\n");
835                 return vpu_major;
836         }
837
838         vpu_class = class_create(THIS_MODULE, "mxc_vpu");
839         if (IS_ERR(vpu_class)) {
840                 err = PTR_ERR(vpu_class);
841                 goto err_out_chrdev;
842         }
843
844         temp_class = device_create(vpu_class, NULL, MKDEV(vpu_major, 0),
845                                    NULL, "mxc_vpu");
846         if (IS_ERR(temp_class)) {
847                 err = PTR_ERR(temp_class);
848                 goto err_out_class;
849         }
850
851         vpu_clk = clk_get(&pdev->dev, "vpu_clk");
852         if (IS_ERR(vpu_clk)) {
853                 err = PTR_ERR(vpu_clk);
854                 goto err_out_class;
855         }
856
857         vpu_ipi_irq = platform_get_irq_byname(pdev, "vpu_ipi_irq");
858         if (vpu_ipi_irq < 0) {
859                 dev_err(vpu_dev, "vpu: unable to get vpu interrupt\n");
860                 err = vpu_ipi_irq;
861                 goto err_out_class;
862         }
863         err = devm_request_irq(vpu_dev, vpu_ipi_irq, vpu_ipi_irq_handler, 0,
864                         "VPU_CODEC_IRQ", drv_data);
865         if (err)
866                 goto err_out_class;
867
868         vpu_regulator = devm_regulator_get(vpu_dev, "pu");
869         if (IS_ERR(vpu_regulator)) {
870                 if (drv_data->soc_data->regulator_required) {
871                         dev_err(vpu_dev, "failed to get vpu power\n");
872                         goto err_out_class;
873                 } else {
874                         /* regulator_get will return error on MX5x,
875                          * just igore it everywhere
876                          */
877                         dev_warn(vpu_dev, "failed to get vpu power\n");
878                 }
879         }
880
881         platform_set_drvdata(pdev, drv_data);
882
883         if (drv_data->soc_data->has_jpu) {
884                 vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
885                 if (vpu_jpu_irq < 0) {
886                         dev_err(vpu_dev, "vpu: unable to get vpu jpu interrupt\n");
887                         err = vpu_jpu_irq;
888                         goto err_out_class;
889                 }
890                 err = devm_request_irq(vpu_dev, vpu_jpu_irq,
891                                 vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
892                                 "VPU_JPG_IRQ", drv_data);
893                 if (err)
894                         goto err_out_class;
895         }
896
897         pm_runtime_enable(&pdev->dev);
898         vpu_data = drv_data;
899
900         dev_info(vpu_dev, "VPU initialized\n");
901         return 0;
902
903 err_out_class:
904         device_destroy(vpu_class, MKDEV(vpu_major, 0));
905         class_destroy(vpu_class);
906 err_out_chrdev:
907         unregister_chrdev(vpu_major, "mxc_vpu");
908         return err;
909 }
910
911 static int vpu_dev_remove(struct platform_device *pdev)
912 {
913         struct vpu_priv *vpu_data = platform_get_drvdata(pdev);
914
915         pm_runtime_disable(&pdev->dev);
916
917         cancel_work_sync(&vpu_data->work);
918         flush_workqueue(vpu_data->workqueue);
919         destroy_workqueue(vpu_data->workqueue);
920
921         if (iram.start)
922                 gen_pool_free(iram_pool, iram_base, iram.end-iram.start+1);
923
924         if (vpu_major > 0) {
925                 device_destroy(vpu_class, MKDEV(vpu_major, 0));
926                 class_destroy(vpu_class);
927                 unregister_chrdev(vpu_major, "mxc_vpu");
928                 vpu_major = 0;
929         }
930
931         vpu_free_dma_buffer(&bitwork_mem);
932         vpu_free_dma_buffer(&pic_para_mem);
933         vpu_free_dma_buffer(&user_data_mem);
934
935         /* reset VPU state */
936         vpu_power_up();
937         vpu_clk_enable(vpu_data);
938         vpu_reset();
939         vpu_clk_disable(vpu_data);
940         vpu_power_down();
941
942         clk_put(vpu_clk);
943         return 0;
944 }
945
946 #ifdef CONFIG_PM_SLEEP
947 static int vpu_suspend(struct device *dev)
948 {
949         struct vpu_priv *vpu_data = dev_get_drvdata(dev);
950         unsigned long timeout;
951
952         mutex_lock(&vpu_data->lock);
953
954         if (open_count) {
955                 /* Wait for vpu go to idle state, suspect vpu cannot be changed
956                  * to idle state after about 1 sec
957                  */
958                 timeout = jiffies + HZ;
959                 while (READ_REG(BIT_BUSY_FLAG)) {
960                         msleep(1);
961                         if (time_after(jiffies, timeout)) {
962                                 mutex_unlock(&vpu_data->lock);
963                                 return -EAGAIN;
964                         }
965                 }
966
967                 if (vpu_data->soc_data->is_mx53) {
968                         mutex_unlock(&vpu_data->lock);
969                         return 0;
970                 }
971
972                 if (bitwork_mem.cpu_addr != 0) {
973                         int i;
974
975                         /* Save 64 registers from BIT_CODE_BUF_ADDR */
976                         for (i = 0; i < 64; i++)
977                                 regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
978                         pc_before_suspend = READ_REG(BIT_CUR_PC);
979                 }
980
981                 vpu_clk_disable(vpu_data);
982                 /* If VPU is working before suspend, disable
983                  * regulator to make usecount right.
984                  */
985                 vpu_power_down();
986         }
987
988         mutex_unlock(&vpu_data->lock);
989         return 0;
990 }
991
992 static int vpu_resume(struct device *dev)
993 {
994         int i;
995         struct vpu_priv *vpu_data = dev_get_drvdata(dev);
996
997         mutex_lock(&vpu_data->lock);
998
999         if (open_count) {
1000                 if (vpu_data->soc_data->is_mx53) {
1001                         vpu_clk_enable(vpu_data);
1002                         goto out;
1003                 }
1004
1005                 /* If VPU is working before suspend, enable
1006                  * regulator to make usecount right.
1007                  */
1008                 vpu_power_up();
1009
1010                 if (bitwork_mem.cpu_addr != NULL) {
1011                         u32 *p = bitwork_mem.cpu_addr;
1012                         u32 data, pc;
1013                         u16 data_hi;
1014                         u16 data_lo;
1015
1016                         vpu_clk_enable(vpu_data);
1017
1018                         pc = READ_REG(BIT_CUR_PC);
1019                         if (pc) {
1020                                 dev_warn(vpu_dev, "Not power off after suspend (PC=0x%x)\n", pc);
1021                                 goto out;
1022                         }
1023
1024                         /* Restore registers */
1025                         for (i = 0; i < 64; i++)
1026                                 WRITE_REG(regBk[i], BIT_CODE_BUF_ADDR + (i * 4));
1027
1028                         WRITE_REG(0x0, BIT_RESET_CTRL);
1029                         WRITE_REG(0x0, BIT_CODE_RUN);
1030
1031                         /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
1032                         if (vpu_data->soc_data->quirk_subblk_en)
1033                                 WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
1034
1035                         /*
1036                          * Re-load boot code, from the codebuffer in external RAM.
1037                          * Thankfully, we only need 4096 bytes, same for all platforms.
1038                          */
1039                         for (i = 0; i < 2048; i += 4) {
1040                                 data = p[(i / 2) + 1];
1041                                 data_hi = (data >> 16) & 0xFFFF;
1042                                 data_lo = data & 0xFFFF;
1043                                 WRITE_REG((i << 16) | data_hi, BIT_CODE_DOWN);
1044                                 WRITE_REG(((i + 1) << 16) | data_lo,
1045                                                 BIT_CODE_DOWN);
1046
1047                                 data = p[i / 2];
1048                                 data_hi = (data >> 16) & 0xFFFF;
1049                                 data_lo = data & 0xFFFF;
1050                                 WRITE_REG(((i + 2) << 16) | data_hi,
1051                                                 BIT_CODE_DOWN);
1052                                 WRITE_REG(((i + 3) << 16) | data_lo,
1053                                                 BIT_CODE_DOWN);
1054                         }
1055
1056                         if (pc_before_suspend) {
1057                                 WRITE_REG(0x1, BIT_BUSY_FLAG);
1058                                 WRITE_REG(0x1, BIT_CODE_RUN);
1059                                 while (READ_REG(BIT_BUSY_FLAG))
1060                                         ;
1061                         } else {
1062                                 dev_warn(vpu_dev, "PC=0 before suspend\n");
1063                         }
1064                 }
1065         }
1066 out:
1067         mutex_unlock(&vpu_data->lock);
1068         return 0;
1069 }
1070
1071 static SIMPLE_DEV_PM_OPS(vpu_pm_ops, vpu_suspend, vpu_resume);
1072 #define VPU_PM_OPS &vpu_pm_ops
1073 #else
1074 #define VPU_PM_OPS NULL
1075 #endif /* !CONFIG_PM_SLEEP */
1076
1077 /*! Driver definition
1078  *
1079  */
1080 static struct platform_driver mxcvpu_driver = {
1081         .driver = {
1082                 .name = "mxc_vpu",
1083                 .of_match_table = vpu_of_match,
1084                 .pm = VPU_PM_OPS,
1085         },
1086         .probe = vpu_dev_probe,
1087         .remove = vpu_dev_remove,
1088 };
1089
1090 module_platform_driver(mxcvpu_driver);
1091
1092 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1093 MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
1094 MODULE_LICENSE("GPL");