]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/mxc/vpu/mxc_vpu.c
a16c31f9c5c80ce6c116cb2a1af8217a014fa719
[karo-tx-linux.git] / drivers / mxc / vpu / mxc_vpu.c
1 /*
2  * Copyright 2006-2013 Freescale Semiconductor, Inc. All Rights Reserved.
3  */
4
5 /*
6  * The code contained herein is licensed under the GNU General Public
7  * License. You may obtain a copy of the GNU General Public License
8  * Version 2 or later at the following locations:
9  *
10  * http://www.opensource.org/licenses/gpl-license.html
11  * http://www.gnu.org/copyleft/gpl.html
12  */
13
14 /*!
15  * @file mxc_vpu.c
16  *
17  * @brief VPU system initialization and file operation implementation
18  *
19  * @ingroup VPU
20  */
21
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/stat.h>
27 #include <linux/platform_device.h>
28 #include <linux/kdev_t.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/iram_alloc.h>
31 #include <linux/wait.h>
32 #include <linux/list.h>
33 #include <linux/clk.h>
34 #include <linux/delay.h>
35 #include <linux/fsl_devices.h>
36 #include <linux/uaccess.h>
37 #include <linux/io.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/sched.h>
41 #include <linux/vmalloc.h>
42 #include <linux/regulator/consumer.h>
43 #include <linux/page-flags.h>
44 #include <linux/mm_types.h>
45 #include <linux/types.h>
46 #include <linux/memblock.h>
47 #include <linux/memory.h>
48 #include <linux/version.h>
49 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
50 #include <linux/module.h>
51 #include <linux/pm_runtime.h>
52 #include <mach/busfreq.h>
53 #include <mach/hardware.h>
54 #include <mach/common.h>
55 #endif
56 #include <asm/page.h>
57 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
58 #include <linux/sizes.h>
59 #else
60 #include <asm/sizes.h>
61 #endif
62 #include <mach/clock.h>
63 #include <mach/hardware.h>
64
65 #include <mach/mxc_vpu.h>
66
67 /* Define one new pgprot which combined uncached and XN(never executable) */
68 #define pgprot_noncachedxn(prot) \
69         __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
70
71 struct vpu_priv {
72         struct fasync_struct *async_queue;
73         struct work_struct work;
74         struct workqueue_struct *workqueue;
75         struct mutex lock;
76 };
77
78 /* To track the allocated memory buffer */
79 struct memalloc_record {
80         struct list_head list;
81         struct vpu_mem_desc mem;
82 };
83
84 struct iram_setting {
85         u32 start;
86         u32 end;
87 };
88
89 static LIST_HEAD(head);
90
91 static int vpu_major;
92 static int vpu_clk_usercount;
93 static struct class *vpu_class;
94 static struct vpu_priv vpu_data;
95 static u8 open_count;
96 static struct clk *vpu_clk;
97 static struct vpu_mem_desc bitwork_mem = { 0 };
98 static struct vpu_mem_desc pic_para_mem = { 0 };
99 static struct vpu_mem_desc user_data_mem = { 0 };
100 static struct vpu_mem_desc share_mem = { 0 };
101 static struct vpu_mem_desc vshare_mem = { 0 };
102
103 static void __iomem *vpu_base;
104 static int vpu_ipi_irq;
105 static u32 phy_vpu_base_addr;
106 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
107 static phys_addr_t top_address_DRAM;
108 static struct mxc_vpu_platform_data *vpu_plat;
109 #endif
110
111 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
112 static struct platform_device *vpu_pdev;
113 #endif
114
115 /* IRAM setting */
116 static struct iram_setting iram;
117
118 /* implement the blocking ioctl */
119 static int irq_status;
120 static int codec_done;
121 static wait_queue_head_t vpu_queue;
122
123 #ifdef CONFIG_SOC_IMX6Q
124 #define MXC_VPU_HAS_JPU
125 #endif
126
127 #ifdef MXC_VPU_HAS_JPU
128 static int vpu_jpu_irq;
129 #endif
130
131 static unsigned int regBk[64];
132 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
133 static struct regulator *vpu_regulator;
134 #endif
135 static unsigned int pc_before_suspend;
136 static atomic_t clk_cnt_from_ioc = ATOMIC_INIT(0);
137
138 #define READ_REG(x)             readl_relaxed(vpu_base + x)
139 #define WRITE_REG(val, x)       writel_relaxed(val, vpu_base + x)
140
141 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
142 /* redirect to static functions */
143 static int cpu_is_mx6dl(void)
144 {
145         int ret;
146         ret = of_machine_is_compatible("fsl,imx6dl");
147         return ret;
148 }
149
150 static int cpu_is_mx6q(void)
151 {
152         int ret;
153         ret = of_machine_is_compatible("fsl,imx6q");
154         return ret;
155 }
156 #endif
157
158 /*!
159  * Private function to alloc dma buffer
160  * @return status  0 success.
161  */
162 static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
163 {
164         mem->cpu_addr = (unsigned long)
165             dma_alloc_coherent(NULL, PAGE_ALIGN(mem->size),
166                                (dma_addr_t *) (&mem->phy_addr),
167                                GFP_DMA | GFP_KERNEL);
168         pr_debug("[ALLOC] mem alloc cpu_addr = 0x%x\n", mem->cpu_addr);
169         if ((void *)(mem->cpu_addr) == NULL) {
170                 printk(KERN_ERR "Physical memory allocation error!\n");
171                 return -1;
172         }
173         return 0;
174 }
175
176 /*!
177  * Private function to free dma buffer
178  */
179 static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
180 {
181         if (mem->cpu_addr != 0) {
182                 dma_free_coherent(0, PAGE_ALIGN(mem->size),
183                                   (void *)mem->cpu_addr, mem->phy_addr);
184         }
185 }
186
187 /*!
188  * Private function to free buffers
189  * @return status  0 success.
190  */
191 static int vpu_free_buffers(void)
192 {
193         struct memalloc_record *rec, *n;
194         struct vpu_mem_desc mem;
195
196         list_for_each_entry_safe(rec, n, &head, list) {
197                 mem = rec->mem;
198                 if (mem.cpu_addr != 0) {
199                         vpu_free_dma_buffer(&mem);
200                         pr_debug("[FREE] freed paddr=0x%08X\n", mem.phy_addr);
201                         /* delete from list */
202                         list_del(&rec->list);
203                         kfree(rec);
204                 }
205         }
206
207         return 0;
208 }
209
210 static inline void vpu_worker_callback(struct work_struct *w)
211 {
212         struct vpu_priv *dev = container_of(w, struct vpu_priv,
213                                 work);
214
215         if (dev->async_queue)
216                 kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
217
218         irq_status = 1;
219         /*
220          * Clock is gated on when dec/enc started, gate it off when
221          * codec is done.
222          */
223         if (codec_done)
224                 codec_done = 0;
225
226         wake_up_interruptible(&vpu_queue);
227 }
228
229 /*!
230  * @brief vpu interrupt handler
231  */
232 static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
233 {
234         struct vpu_priv *dev = dev_id;
235         unsigned long reg;
236
237         reg = READ_REG(BIT_INT_REASON);
238         if (reg & 0x8)
239                 codec_done = 1;
240         WRITE_REG(0x1, BIT_INT_CLEAR);
241
242         queue_work(dev->workqueue, &dev->work);
243
244         return IRQ_HANDLED;
245 }
246
247 /*!
248  * @brief vpu jpu interrupt handler
249  */
250 #ifdef MXC_VPU_HAS_JPU
251 static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
252 {
253         struct vpu_priv *dev = dev_id;
254         unsigned long reg;
255
256         reg = READ_REG(MJPEG_PIC_STATUS_REG);
257         if (reg & 0x3)
258                 codec_done = 1;
259
260         queue_work(dev->workqueue, &dev->work);
261
262         return IRQ_HANDLED;
263 }
264 #endif
265
266 /*!
267  * @brief check phy memory prepare to pass to vpu is valid or not, we
268  * already address some issue that if pass a wrong address to vpu
269  * (like virtual address), system will hang.
270  *
271  * @return true return is a valid phy memory address, false return not.
272  */
273 bool vpu_is_valid_phy_memory(u32 paddr)
274 {
275 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
276         if (paddr > top_address_DRAM)
277                 return false;
278 #endif
279
280         return true;
281 }
282
283 /*!
284  * @brief open function for vpu file operation
285  *
286  * @return  0 on success or negative error code on error
287  */
288 static int vpu_open(struct inode *inode, struct file *filp)
289 {
290
291         mutex_lock(&vpu_data.lock);
292
293         if (open_count++ == 0) {
294 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
295                 if (!IS_ERR(vpu_regulator))
296                         regulator_enable(vpu_regulator);
297 #else
298                 pm_runtime_get_sync(&vpu_pdev->dev);
299                 imx_gpc_power_up_pu(true);
300 #endif
301
302 #ifdef CONFIG_SOC_IMX6Q
303                 clk_prepare(vpu_clk);
304                 clk_enable(vpu_clk);
305                 if (READ_REG(BIT_CUR_PC))
306                         pr_debug("Not power off before vpu open!\n");
307                 clk_disable(vpu_clk);
308                 clk_unprepare(vpu_clk);
309 #endif
310         }
311
312         filp->private_data = (void *)(&vpu_data);
313         mutex_unlock(&vpu_data.lock);
314         return 0;
315 }
316
317 /*!
318  * @brief IO ctrl function for vpu file operation
319  * @param cmd IO ctrl command
320  * @return  0 on success or negative error code on error
321  */
322 static long vpu_ioctl(struct file *filp, u_int cmd,
323                      u_long arg)
324 {
325         int ret = 0;
326
327         switch (cmd) {
328         case VPU_IOC_PHYMEM_ALLOC:
329                 {
330                         struct memalloc_record *rec;
331
332                         rec = kzalloc(sizeof(*rec), GFP_KERNEL);
333                         if (!rec)
334                                 return -ENOMEM;
335
336                         ret = copy_from_user(&(rec->mem),
337                                              (struct vpu_mem_desc *)arg,
338                                              sizeof(struct vpu_mem_desc));
339                         if (ret) {
340                                 kfree(rec);
341                                 return -EFAULT;
342                         }
343
344                         pr_debug("[ALLOC] mem alloc size = 0x%x\n",
345                                  rec->mem.size);
346
347                         ret = vpu_alloc_dma_buffer(&(rec->mem));
348                         if (ret == -1) {
349                                 kfree(rec);
350                                 printk(KERN_ERR
351                                        "Physical memory allocation error!\n");
352                                 break;
353                         }
354                         ret = copy_to_user((void __user *)arg, &(rec->mem),
355                                            sizeof(struct vpu_mem_desc));
356                         if (ret) {
357                                 kfree(rec);
358                                 ret = -EFAULT;
359                                 break;
360                         }
361
362                         mutex_lock(&vpu_data.lock);
363                         list_add(&rec->list, &head);
364                         mutex_unlock(&vpu_data.lock);
365
366                         break;
367                 }
368         case VPU_IOC_PHYMEM_FREE:
369                 {
370                         struct memalloc_record *rec, *n;
371                         struct vpu_mem_desc vpu_mem;
372
373                         ret = copy_from_user(&vpu_mem,
374                                              (struct vpu_mem_desc *)arg,
375                                              sizeof(struct vpu_mem_desc));
376                         if (ret)
377                                 return -EACCES;
378
379                         pr_debug("[FREE] mem freed cpu_addr = 0x%x\n",
380                                  vpu_mem.cpu_addr);
381                         if ((void *)vpu_mem.cpu_addr != NULL)
382                                 vpu_free_dma_buffer(&vpu_mem);
383
384                         mutex_lock(&vpu_data.lock);
385                         list_for_each_entry_safe(rec, n, &head, list) {
386                                 if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
387                                         /* delete from list */
388                                         list_del(&rec->list);
389                                         kfree(rec);
390                                         break;
391                                 }
392                         }
393                         mutex_unlock(&vpu_data.lock);
394
395                         break;
396                 }
397         case VPU_IOC_WAIT4INT:
398                 {
399                         u_long timeout = (u_long) arg;
400                         if (!wait_event_interruptible_timeout
401                             (vpu_queue, irq_status != 0,
402                              msecs_to_jiffies(timeout))) {
403                                 printk(KERN_WARNING "VPU blocking: timeout.\n");
404                                 ret = -ETIME;
405                         } else if (signal_pending(current)) {
406                                 printk(KERN_WARNING
407                                        "VPU interrupt received.\n");
408                                 ret = -ERESTARTSYS;
409                         } else
410                                 irq_status = 0;
411                         break;
412                 }
413         case VPU_IOC_IRAM_SETTING:
414                 {
415                         ret = copy_to_user((void __user *)arg, &iram,
416                                            sizeof(struct iram_setting));
417                         if (ret)
418                                 ret = -EFAULT;
419
420                         break;
421                 }
422         case VPU_IOC_CLKGATE_SETTING:
423                 {
424                         u32 clkgate_en;
425
426                         if (get_user(clkgate_en, (u32 __user *) arg))
427                                 return -EFAULT;
428
429                         if (clkgate_en) {
430                                 clk_prepare(vpu_clk);
431                                 clk_enable(vpu_clk);
432                                 atomic_inc(&clk_cnt_from_ioc);
433                         } else {
434                                 clk_disable(vpu_clk);
435                                 clk_unprepare(vpu_clk);
436                                 atomic_dec(&clk_cnt_from_ioc);
437                         }
438
439                         break;
440                 }
441         case VPU_IOC_GET_SHARE_MEM:
442                 {
443                         mutex_lock(&vpu_data.lock);
444                         if (share_mem.cpu_addr != 0) {
445                                 ret = copy_to_user((void __user *)arg,
446                                                    &share_mem,
447                                                    sizeof(struct vpu_mem_desc));
448                                 mutex_unlock(&vpu_data.lock);
449                                 break;
450                         } else {
451                                 if (copy_from_user(&share_mem,
452                                                    (struct vpu_mem_desc *)arg,
453                                                  sizeof(struct vpu_mem_desc))) {
454                                         mutex_unlock(&vpu_data.lock);
455                                         return -EFAULT;
456                                 }
457                                 if (vpu_alloc_dma_buffer(&share_mem) == -1)
458                                         ret = -EFAULT;
459                                 else {
460                                         if (copy_to_user((void __user *)arg,
461                                                          &share_mem,
462                                                          sizeof(struct
463                                                                 vpu_mem_desc)))
464                                                 ret = -EFAULT;
465                                 }
466                         }
467                         mutex_unlock(&vpu_data.lock);
468                         break;
469                 }
470         case VPU_IOC_REQ_VSHARE_MEM:
471                 {
472                         mutex_lock(&vpu_data.lock);
473                         if (vshare_mem.cpu_addr != 0) {
474                                 ret = copy_to_user((void __user *)arg,
475                                                    &vshare_mem,
476                                                    sizeof(struct vpu_mem_desc));
477                                 mutex_unlock(&vpu_data.lock);
478                                 break;
479                         } else {
480                                 if (copy_from_user(&vshare_mem,
481                                                    (struct vpu_mem_desc *)arg,
482                                                    sizeof(struct
483                                                           vpu_mem_desc))) {
484                                         mutex_unlock(&vpu_data.lock);
485                                         return -EFAULT;
486                                 }
487                                 /* vmalloc shared memory if not allocated */
488                                 if (!vshare_mem.cpu_addr)
489                                         vshare_mem.cpu_addr =
490                                             (unsigned long)
491                                             vmalloc_user(vshare_mem.size);
492                                 if (copy_to_user
493                                      ((void __user *)arg, &vshare_mem,
494                                      sizeof(struct vpu_mem_desc)))
495                                         ret = -EFAULT;
496                         }
497                         mutex_unlock(&vpu_data.lock);
498                         break;
499                 }
500         case VPU_IOC_GET_WORK_ADDR:
501                 {
502                         if (bitwork_mem.cpu_addr != 0) {
503                                 ret =
504                                     copy_to_user((void __user *)arg,
505                                                  &bitwork_mem,
506                                                  sizeof(struct vpu_mem_desc));
507                                 break;
508                         } else {
509                                 if (copy_from_user(&bitwork_mem,
510                                                    (struct vpu_mem_desc *)arg,
511                                                    sizeof(struct vpu_mem_desc)))
512                                         return -EFAULT;
513
514                                 if (vpu_alloc_dma_buffer(&bitwork_mem) == -1)
515                                         ret = -EFAULT;
516                                 else if (copy_to_user((void __user *)arg,
517                                                       &bitwork_mem,
518                                                       sizeof(struct
519                                                              vpu_mem_desc)))
520                                         ret = -EFAULT;
521                         }
522                         break;
523                 }
524         /*
525          * The following two ioctl is used when user allocates working buffer
526          * and register it to vpu driver.
527          */
528         case VPU_IOC_QUERY_BITWORK_MEM:
529                 {
530                         if (copy_to_user((void __user *)arg,
531                                          &bitwork_mem,
532                                          sizeof(struct vpu_mem_desc)))
533                                 ret = -EFAULT;
534                         break;
535                 }
536         case VPU_IOC_SET_BITWORK_MEM:
537                 {
538                         if (copy_from_user(&bitwork_mem,
539                                            (struct vpu_mem_desc *)arg,
540                                            sizeof(struct vpu_mem_desc)))
541                                 ret = -EFAULT;
542                         break;
543                 }
544         case VPU_IOC_SYS_SW_RESET:
545                 {
546 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
547                         imx_src_reset_vpu();
548 #else
549                         if (vpu_plat->reset)
550                                 vpu_plat->reset();
551 #endif
552
553                         break;
554                 }
555         case VPU_IOC_REG_DUMP:
556                 break;
557         case VPU_IOC_PHYMEM_DUMP:
558                 break;
559         case VPU_IOC_PHYMEM_CHECK:
560         {
561                 struct vpu_mem_desc check_memory;
562                 ret = copy_from_user(&check_memory,
563                                      (void __user *)arg,
564                                      sizeof(struct vpu_mem_desc));
565                 if (ret != 0) {
566                         printk(KERN_ERR "copy from user failure:%d\n", ret);
567                         ret = -EFAULT;
568                         break;
569                 }
570                 ret = vpu_is_valid_phy_memory((u32)check_memory.phy_addr);
571
572                 pr_debug("vpu: memory phy:0x%x %s phy memory\n",
573                        check_memory.phy_addr, (ret ? "is" : "isn't"));
574                 /* borrow .size to pass back the result. */
575                 check_memory.size = ret;
576                 ret = copy_to_user((void __user *)arg, &check_memory,
577                                    sizeof(struct vpu_mem_desc));
578                 if (ret) {
579                         ret = -EFAULT;
580                         break;
581                 }
582                 break;
583         }
584         case VPU_IOC_LOCK_DEV:
585                 {
586                         u32 lock_en;
587
588                         if (get_user(lock_en, (u32 __user *) arg))
589                                 return -EFAULT;
590
591                         if (lock_en)
592                                 mutex_lock(&vpu_data.lock);
593                         else
594                                 mutex_unlock(&vpu_data.lock);
595
596                         break;
597                 }
598         default:
599                 {
600                         printk(KERN_ERR "No such IOCTL, cmd is %d\n", cmd);
601                         ret = -EINVAL;
602                         break;
603                 }
604         }
605         return ret;
606 }
607
608 /*!
609  * @brief Release function for vpu file operation
610  * @return  0 on success or negative error code on error
611  */
612 static int vpu_release(struct inode *inode, struct file *filp)
613 {
614         int i;
615         unsigned long timeout;
616
617         mutex_lock(&vpu_data.lock);
618
619         if (open_count > 0 && !(--open_count)) {
620
621                 /* Wait for vpu go to idle state */
622                 clk_prepare(vpu_clk);
623                 clk_enable(vpu_clk);
624                 if (READ_REG(BIT_CUR_PC)) {
625
626                         timeout = jiffies + HZ;
627                         while (READ_REG(BIT_BUSY_FLAG)) {
628                                 msleep(1);
629                                 if (time_after(jiffies, timeout)) {
630                                         printk(KERN_WARNING "VPU timeout during release\n");
631                                         break;
632                                 }
633                         }
634                         clk_disable(vpu_clk);
635                         clk_unprepare(vpu_clk);
636
637                         /* Clean up interrupt */
638                         cancel_work_sync(&vpu_data.work);
639                         flush_workqueue(vpu_data.workqueue);
640                         irq_status = 0;
641
642                         clk_prepare(vpu_clk);
643                         clk_enable(vpu_clk);
644                         if (READ_REG(BIT_BUSY_FLAG)) {
645
646                                 if (cpu_is_mx51() || cpu_is_mx53()) {
647                                         printk(KERN_ERR
648                                                 "fatal error: can't gate/power off when VPU is busy\n");
649                                         clk_disable(vpu_clk);
650                                         clk_unprepare(vpu_clk);
651                                         mutex_unlock(&vpu_data.lock);
652                                         return -EFAULT;
653                                 }
654
655 #ifdef CONFIG_SOC_IMX6Q
656                                 if (cpu_is_mx6dl() || cpu_is_mx6q()) {
657                                         WRITE_REG(0x11, 0x10F0);
658                                         timeout = jiffies + HZ;
659                                         while (READ_REG(0x10F4) != 0x77) {
660                                                 msleep(1);
661                                                 if (time_after(jiffies, timeout))
662                                                         break;
663                                         }
664
665                                         if (READ_REG(0x10F4) != 0x77) {
666                                                 printk(KERN_ERR
667                                                         "fatal error: can't gate/power off when VPU is busy\n");
668                                                 WRITE_REG(0x0, 0x10F0);
669                                                 clk_disable(vpu_clk);
670                                                 clk_unprepare(vpu_clk);
671                                                 mutex_unlock(&vpu_data.lock);
672                                                 return -EFAULT;
673                                         } else {
674 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
675                                                 imx_src_reset_vpu();
676 #else
677                                                 if (vpu_plat->reset)
678                                                         vpu_plat->reset();
679 #endif
680                                         }
681                                 }
682 #endif
683                         }
684                 }
685                 clk_disable(vpu_clk);
686                 clk_unprepare(vpu_clk);
687
688                 vpu_free_buffers();
689
690                 /* Free shared memory when vpu device is idle */
691                 vpu_free_dma_buffer(&share_mem);
692                 share_mem.cpu_addr = 0;
693                 vfree((void *)vshare_mem.cpu_addr);
694                 vshare_mem.cpu_addr = 0;
695
696                 vpu_clk_usercount = atomic_read(&clk_cnt_from_ioc);
697                 for (i = 0; i < vpu_clk_usercount; i++) {
698                         clk_disable(vpu_clk);
699                         clk_unprepare(vpu_clk);
700                         atomic_dec(&clk_cnt_from_ioc);
701                 }
702
703 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
704                 if (!IS_ERR(vpu_regulator))
705                         regulator_disable(vpu_regulator);
706 #else
707                 imx_gpc_power_up_pu(false);
708                 pm_runtime_put_sync_suspend(&vpu_pdev->dev);
709 #endif
710
711         }
712         mutex_unlock(&vpu_data.lock);
713
714         return 0;
715 }
716
717 /*!
718  * @brief fasync function for vpu file operation
719  * @return  0 on success or negative error code on error
720  */
721 static int vpu_fasync(int fd, struct file *filp, int mode)
722 {
723         struct vpu_priv *dev = (struct vpu_priv *)filp->private_data;
724         return fasync_helper(fd, filp, mode, &dev->async_queue);
725 }
726
727 /*!
728  * @brief memory map function of harware registers for vpu file operation
729  * @return  0 on success or negative error code on error
730  */
731 static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
732 {
733         unsigned long pfn;
734
735         vm->vm_flags |= VM_IO | VM_RESERVED;
736         /*
737          * Since vpu registers have been mapped with ioremap() at probe
738          * which L_PTE_XN is 1, and the same physical address must be
739          * mapped multiple times with same type, so set L_PTE_XN to 1 here.
740          * Otherwise, there may be unexpected result in video codec.
741          */
742         vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
743         pfn = phy_vpu_base_addr >> PAGE_SHIFT;
744         pr_debug("size=0x%x,  page no.=0x%x\n",
745                  (int)(vm->vm_end - vm->vm_start), (int)pfn);
746         return remap_pfn_range(vm, vm->vm_start, pfn, vm->vm_end - vm->vm_start,
747                                vm->vm_page_prot) ? -EAGAIN : 0;
748 }
749
750 /*!
751  * @brief memory map function of memory for vpu file operation
752  * @return  0 on success or negative error code on error
753  */
754 static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
755 {
756         int request_size;
757         request_size = vm->vm_end - vm->vm_start;
758
759         pr_debug(" start=0x%x, pgoff=0x%x, size=0x%x\n",
760                  (unsigned int)(vm->vm_start), (unsigned int)(vm->vm_pgoff),
761                  request_size);
762
763         vm->vm_flags |= VM_IO | VM_RESERVED;
764         vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
765
766         return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
767                                request_size, vm->vm_page_prot) ? -EAGAIN : 0;
768
769 }
770
771 /* !
772  * @brief memory map function of vmalloced share memory
773  * @return  0 on success or negative error code on error
774  */
775 static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
776 {
777         int ret = -EINVAL;
778
779         ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
780         vm->vm_flags |= VM_IO;
781
782         return ret;
783 }
784 /*!
785  * @brief memory map interface for vpu file operation
786  * @return  0 on success or negative error code on error
787  */
788 static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
789 {
790         unsigned long offset;
791
792         offset = vshare_mem.cpu_addr >> PAGE_SHIFT;
793
794         if (vm->vm_pgoff && (vm->vm_pgoff == offset))
795                 return vpu_map_vshare_mem(fp, vm);
796         else if (vm->vm_pgoff)
797                 return vpu_map_dma_mem(fp, vm);
798         else
799                 return vpu_map_hwregs(fp, vm);
800 }
801
802 const struct file_operations vpu_fops = {
803         .owner = THIS_MODULE,
804         .open = vpu_open,
805         .unlocked_ioctl = vpu_ioctl,
806         .release = vpu_release,
807         .fasync = vpu_fasync,
808         .mmap = vpu_mmap,
809 };
810
811 /*!
812  * This function is called by the driver framework to initialize the vpu device.
813  * @param   dev The device structure for the vpu passed in by the framework.
814  * @return   0 on success or negative error code on error
815  */
816 static int vpu_dev_probe(struct platform_device *pdev)
817 {
818         int err = 0;
819         struct device *temp_class;
820         struct resource *res;
821         unsigned long addr = 0;
822
823 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
824         struct device_node *np = pdev->dev.of_node;
825         u32 iramsize;
826
827         err = of_property_read_u32(np, "iramsize", (u32 *)&iramsize);
828         if (!err && iramsize)
829                 iram_alloc(iramsize, &addr);
830         if (addr == 0)
831                 iram.start = iram.end = 0;
832         else {
833                 iram.start = addr;
834                 iram.end = addr + iramsize - 1;
835         }
836
837         vpu_pdev = pdev;
838 #else
839
840         vpu_plat = pdev->dev.platform_data;
841
842         if (vpu_plat && vpu_plat->iram_enable && vpu_plat->iram_size)
843                 iram_alloc(vpu_plat->iram_size, &addr);
844         if (addr == 0)
845                 iram.start = iram.end = 0;
846         else {
847                 iram.start = addr;
848                 iram.end = addr +  vpu_plat->iram_size - 1;
849         }
850 #endif
851
852         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
853         if (!res) {
854                 printk(KERN_ERR "vpu: unable to get vpu base addr\n");
855                 return -ENODEV;
856         }
857         phy_vpu_base_addr = res->start;
858         vpu_base = ioremap(res->start, res->end - res->start);
859
860         vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
861         if (vpu_major < 0) {
862                 printk(KERN_ERR "vpu: unable to get a major for VPU\n");
863                 err = -EBUSY;
864                 goto error;
865         }
866
867         vpu_class = class_create(THIS_MODULE, "mxc_vpu");
868         if (IS_ERR(vpu_class)) {
869                 err = PTR_ERR(vpu_class);
870                 goto err_out_chrdev;
871         }
872
873         temp_class = device_create(vpu_class, NULL, MKDEV(vpu_major, 0),
874                                    NULL, "mxc_vpu");
875         if (IS_ERR(temp_class)) {
876                 err = PTR_ERR(temp_class);
877                 goto err_out_class;
878         }
879
880         vpu_clk = clk_get(&pdev->dev, "vpu_clk");
881         if (IS_ERR(vpu_clk)) {
882                 err = -ENOENT;
883                 goto err_out_class;
884         }
885
886         vpu_ipi_irq = platform_get_irq_byname(pdev, "vpu_ipi_irq");
887         if (vpu_ipi_irq < 0) {
888                 printk(KERN_ERR "vpu: unable to get vpu interrupt\n");
889                 err = -ENXIO;
890                 goto err_out_class;
891         }
892         err = request_irq(vpu_ipi_irq, vpu_ipi_irq_handler, 0, "VPU_CODEC_IRQ",
893                           (void *)(&vpu_data));
894         if (err)
895                 goto err_out_class;
896 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
897         vpu_regulator = regulator_get(NULL, "cpu_vddvpu");
898         if (IS_ERR(vpu_regulator)) {
899                 if (!(cpu_is_mx51() || cpu_is_mx53())) {
900                         printk(KERN_ERR
901                                 "%s: failed to get vpu regulator\n", __func__);
902                         goto err_out_class;
903                 } else {
904                         /* regulator_get will return error on MX5x,
905                          * just igore it everywhere*/
906                         printk(KERN_WARNING
907                                 "%s: failed to get vpu regulator\n", __func__);
908                 }
909         }
910 #endif
911
912 #ifdef MXC_VPU_HAS_JPU
913         vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
914         if (vpu_jpu_irq < 0) {
915                 printk(KERN_ERR "vpu: unable to get vpu jpu interrupt\n");
916                 err = -ENXIO;
917                 free_irq(vpu_ipi_irq, &vpu_data);
918                 goto err_out_class;
919         }
920         err = request_irq(vpu_jpu_irq, vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
921                           "VPU_JPG_IRQ", (void *)(&vpu_data));
922         if (err) {
923                 free_irq(vpu_ipi_irq, &vpu_data);
924                 goto err_out_class;
925         }
926 #endif
927
928 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
929         pm_runtime_enable(&pdev->dev);
930 #endif
931
932         vpu_data.workqueue = create_workqueue("vpu_wq");
933         INIT_WORK(&vpu_data.work, vpu_worker_callback);
934         mutex_init(&vpu_data.lock);
935         printk(KERN_INFO "VPU initialized\n");
936         goto out;
937
938 err_out_class:
939         device_destroy(vpu_class, MKDEV(vpu_major, 0));
940         class_destroy(vpu_class);
941 err_out_chrdev:
942         unregister_chrdev(vpu_major, "mxc_vpu");
943 error:
944         iounmap(vpu_base);
945 out:
946         return err;
947 }
948
949 static int vpu_dev_remove(struct platform_device *pdev)
950 {
951         free_irq(vpu_ipi_irq, &vpu_data);
952 #ifdef MXC_VPU_HAS_JPU
953         free_irq(vpu_jpu_irq, &vpu_data);
954 #endif
955         cancel_work_sync(&vpu_data.work);
956         flush_workqueue(vpu_data.workqueue);
957         destroy_workqueue(vpu_data.workqueue);
958
959         iounmap(vpu_base);
960 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
961         if (iram.start)
962                 iram_free(iram.start, iram.end-iram.start+1);
963 #else
964         if (vpu_plat && vpu_plat->iram_enable && vpu_plat->iram_size)
965                 iram_free(iram.start,  vpu_plat->iram_size);
966 #endif
967
968 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
969         if (!IS_ERR(vpu_regulator))
970                 regulator_put(vpu_regulator);
971 #endif
972         return 0;
973 }
974
975 #ifdef CONFIG_PM
976 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
977 static int vpu_suspend(struct device *dev)
978 #else
979 static int vpu_suspend(struct platform_device *pdev, pm_message_t state)
980 #endif
981 {
982         int i;
983         unsigned long timeout;
984
985         mutex_lock(&vpu_data.lock);
986         if (open_count == 0) {
987                 /* VPU is released (all instances are freed),
988                  * clock is already off, context is no longer needed,
989                  * power is already off on MX6,
990                  * gate power on MX51 */
991                 if (cpu_is_mx51()) {
992 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
993                         if (vpu_plat->pg)
994                                 vpu_plat->pg(1);
995 #endif
996                 }
997         } else {
998                 /* Wait for vpu go to idle state, suspect vpu cannot be changed
999                    to idle state after about 1 sec */
1000                 timeout = jiffies + HZ;
1001                 clk_prepare(vpu_clk);
1002                 clk_enable(vpu_clk);
1003                 while (READ_REG(BIT_BUSY_FLAG)) {
1004                         msleep(1);
1005                         if (time_after(jiffies, timeout)) {
1006                                 clk_disable(vpu_clk);
1007                                 clk_unprepare(vpu_clk);
1008                                 mutex_unlock(&vpu_data.lock);
1009                                 return -EAGAIN;
1010                         }
1011                 }
1012                 clk_disable(vpu_clk);
1013                 clk_unprepare(vpu_clk);
1014
1015                 /* Make sure clock is disabled before suspend */
1016                 vpu_clk_usercount = atomic_read(&clk_cnt_from_ioc);
1017                 for (i = 0; i < vpu_clk_usercount; i++) {
1018                         clk_disable(vpu_clk);
1019                         clk_unprepare(vpu_clk);
1020                 }
1021
1022                 if (cpu_is_mx53()) {
1023                         mutex_unlock(&vpu_data.lock);
1024                         return 0;
1025                 }
1026
1027                 if (bitwork_mem.cpu_addr != 0) {
1028                         clk_prepare(vpu_clk);
1029                         clk_enable(vpu_clk);
1030                         /* Save 64 registers from BIT_CODE_BUF_ADDR */
1031                         for (i = 0; i < 64; i++)
1032                                 regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
1033                         pc_before_suspend = READ_REG(BIT_CUR_PC);
1034                         clk_disable(vpu_clk);
1035                         clk_unprepare(vpu_clk);
1036                 }
1037
1038 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1039                 if (vpu_plat->pg)
1040                         vpu_plat->pg(1);
1041 #endif
1042
1043                 /* If VPU is working before suspend, disable
1044                  * regulator to make usecount right. */
1045 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1046                 if (!IS_ERR(vpu_regulator))
1047                         regulator_disable(vpu_regulator);
1048 #else
1049                 imx_gpc_power_up_pu(false);
1050 #endif
1051         }
1052
1053         mutex_unlock(&vpu_data.lock);
1054         return 0;
1055 }
1056
1057 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1058 static int vpu_resume(struct device *dev)
1059 #else
1060 static int vpu_resume(struct platform_device *pdev)
1061 #endif
1062 {
1063         int i;
1064
1065         mutex_lock(&vpu_data.lock);
1066         if (open_count == 0) {
1067                 /* VPU is released (all instances are freed),
1068                  * clock should be kept off, context is no longer needed,
1069                  * power should be kept off on MX6,
1070                  * disable power gating on MX51 */
1071                 if (cpu_is_mx51()) {
1072 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1073                         if (vpu_plat->pg)
1074                                 vpu_plat->pg(0);
1075 #endif
1076                 }
1077         } else {
1078                 if (cpu_is_mx53())
1079                         goto recover_clk;
1080
1081 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1082                 /* If VPU is working before suspend, enable
1083                  * regulator to make usecount right. */
1084                 if (!IS_ERR(vpu_regulator))
1085                         regulator_enable(vpu_regulator);
1086
1087                 if (vpu_plat->pg)
1088                         vpu_plat->pg(0);
1089 #else
1090                 imx_gpc_power_up_pu(true);
1091 #endif
1092
1093                 if (bitwork_mem.cpu_addr != 0) {
1094                         u32 *p = (u32 *) bitwork_mem.cpu_addr;
1095                         u32 data, pc;
1096                         u16 data_hi;
1097                         u16 data_lo;
1098
1099                         clk_prepare(vpu_clk);
1100                         clk_enable(vpu_clk);
1101
1102                         pc = READ_REG(BIT_CUR_PC);
1103                         if (pc) {
1104                                 printk(KERN_WARNING "Not power off after suspend (PC=0x%x)\n", pc);
1105                                 clk_disable(vpu_clk);
1106                                 clk_unprepare(vpu_clk);
1107                                 goto recover_clk;
1108                         }
1109
1110                         /* Restore registers */
1111                         for (i = 0; i < 64; i++)
1112                                 WRITE_REG(regBk[i], BIT_CODE_BUF_ADDR + (i * 4));
1113
1114                         WRITE_REG(0x0, BIT_RESET_CTRL);
1115                         WRITE_REG(0x0, BIT_CODE_RUN);
1116                         /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
1117 #ifdef CONFIG_SOC_IMX6Q
1118                         WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
1119 #endif
1120
1121                         /*
1122                          * Re-load boot code, from the codebuffer in external RAM.
1123                          * Thankfully, we only need 4096 bytes, same for all platforms.
1124                          */
1125                         for (i = 0; i < 2048; i += 4) {
1126                                 data = p[(i / 2) + 1];
1127                                 data_hi = (data >> 16) & 0xFFFF;
1128                                 data_lo = data & 0xFFFF;
1129                                 WRITE_REG((i << 16) | data_hi, BIT_CODE_DOWN);
1130                                 WRITE_REG(((i + 1) << 16) | data_lo,
1131                                                 BIT_CODE_DOWN);
1132
1133                                 data = p[i / 2];
1134                                 data_hi = (data >> 16) & 0xFFFF;
1135                                 data_lo = data & 0xFFFF;
1136                                 WRITE_REG(((i + 2) << 16) | data_hi,
1137                                                 BIT_CODE_DOWN);
1138                                 WRITE_REG(((i + 3) << 16) | data_lo,
1139                                                 BIT_CODE_DOWN);
1140                         }
1141
1142                         if (pc_before_suspend) {
1143                                 WRITE_REG(0x1, BIT_BUSY_FLAG);
1144                                 WRITE_REG(0x1, BIT_CODE_RUN);
1145                                 while (READ_REG(BIT_BUSY_FLAG))
1146                                         ;
1147                         } else {
1148                                 printk(KERN_WARNING "PC=0 before suspend\n");
1149                         }
1150                         clk_disable(vpu_clk);
1151                         clk_unprepare(vpu_clk);
1152                 }
1153
1154 recover_clk:
1155                 /* Recover vpu clock */
1156                 for (i = 0; i < vpu_clk_usercount; i++) {
1157                         clk_prepare(vpu_clk);
1158                         clk_enable(vpu_clk);
1159                 }
1160         }
1161
1162         mutex_unlock(&vpu_data.lock);
1163         return 0;
1164 }
1165
1166 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1167 static int vpu_runtime_suspend(struct device *dev)
1168 {
1169         release_bus_freq(BUS_FREQ_HIGH);
1170         return 0;
1171 }
1172
1173 static int vpu_runtime_resume(struct device *dev)
1174 {
1175         request_bus_freq(BUS_FREQ_HIGH);
1176         return 0;
1177 }
1178
1179 static const struct dev_pm_ops vpu_pm_ops = {
1180         SET_RUNTIME_PM_OPS(vpu_runtime_suspend, vpu_runtime_resume, NULL)
1181         SET_SYSTEM_SLEEP_PM_OPS(vpu_suspend, vpu_resume)
1182 };
1183 #endif
1184
1185 #else
1186 #define vpu_suspend     NULL
1187 #define vpu_resume      NULL
1188 #endif                          /* !CONFIG_PM */
1189
1190 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1191 static const struct of_device_id vpu_of_match[] = {
1192         { .compatible = "fsl,imx6-vpu", },
1193         {/* sentinel */}
1194 };
1195 MODULE_DEVICE_TABLE(of, vpu_of_match);
1196 #endif
1197
1198 /*! Driver definition
1199  *
1200  */
1201 static struct platform_driver mxcvpu_driver = {
1202         .driver = {
1203                    .name = "mxc_vpu",
1204 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1205                    .of_match_table = vpu_of_match,
1206 #ifdef CONFIG_PM
1207                    .pm = &vpu_pm_ops,
1208 #endif
1209 #endif
1210                    },
1211         .probe = vpu_dev_probe,
1212         .remove = vpu_dev_remove,
1213 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1214         .suspend = vpu_suspend,
1215         .resume = vpu_resume,
1216 #endif
1217 };
1218
1219 static int __init vpu_init(void)
1220 {
1221         int ret = platform_driver_register(&mxcvpu_driver);
1222
1223         init_waitqueue_head(&vpu_queue);
1224
1225
1226 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1227         memblock_analyze();
1228         top_address_DRAM = memblock_end_of_DRAM_with_reserved();
1229 #endif
1230
1231         return ret;
1232 }
1233
1234 static void __exit vpu_exit(void)
1235 {
1236         if (vpu_major > 0) {
1237                 device_destroy(vpu_class, MKDEV(vpu_major, 0));
1238                 class_destroy(vpu_class);
1239                 unregister_chrdev(vpu_major, "mxc_vpu");
1240                 vpu_major = 0;
1241         }
1242
1243         vpu_free_dma_buffer(&bitwork_mem);
1244         vpu_free_dma_buffer(&pic_para_mem);
1245         vpu_free_dma_buffer(&user_data_mem);
1246
1247         /* reset VPU state */
1248 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1249         if (!IS_ERR(vpu_regulator))
1250                 regulator_enable(vpu_regulator);
1251         clk_prepare(vpu_clk);
1252         clk_enable(vpu_clk);
1253         if (vpu_plat->reset)
1254                 vpu_plat->reset();
1255         clk_disable(vpu_clk);
1256         clk_unprepare(vpu_clk);
1257         if (!IS_ERR(vpu_regulator))
1258                 regulator_disable(vpu_regulator);
1259 #else
1260         imx_gpc_power_up_pu(true);
1261         clk_prepare(vpu_clk);
1262         clk_enable(vpu_clk);
1263         imx_src_reset_vpu();
1264         clk_disable(vpu_clk);
1265         clk_unprepare(vpu_clk);
1266         imx_gpc_power_up_pu(false);
1267 #endif
1268
1269         clk_put(vpu_clk);
1270
1271         platform_driver_unregister(&mxcvpu_driver);
1272         return;
1273 }
1274
1275 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1276 MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
1277 MODULE_LICENSE("GPL");
1278
1279 module_init(vpu_init);
1280 module_exit(vpu_exit);