]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/mxc/vpu/mxc_vpu.c
ENGR00283192 Avoid vpu rmmod failure and modprobe warning
[karo-tx-linux.git] / drivers / mxc / vpu / mxc_vpu.c
1 /*
2  * Copyright 2006-2013 Freescale Semiconductor, Inc. All Rights Reserved.
3  */
4
5 /*
6  * The code contained herein is licensed under the GNU General Public
7  * License. You may obtain a copy of the GNU General Public License
8  * Version 2 or later at the following locations:
9  *
10  * http://www.opensource.org/licenses/gpl-license.html
11  * http://www.gnu.org/copyleft/gpl.html
12  */
13
14 /*!
15  * @file mxc_vpu.c
16  *
17  * @brief VPU system initialization and file operation implementation
18  *
19  * @ingroup VPU
20  */
21
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/stat.h>
27 #include <linux/platform_device.h>
28 #include <linux/kdev_t.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/wait.h>
31 #include <linux/list.h>
32 #include <linux/clk.h>
33 #include <linux/delay.h>
34 #include <linux/fsl_devices.h>
35 #include <linux/uaccess.h>
36 #include <linux/io.h>
37 #include <linux/slab.h>
38 #include <linux/workqueue.h>
39 #include <linux/sched.h>
40 #include <linux/vmalloc.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/page-flags.h>
43 #include <linux/mm_types.h>
44 #include <linux/types.h>
45 #include <linux/memblock.h>
46 #include <linux/memory.h>
47 #include <linux/version.h>
48 #include <asm/page.h>
49
50 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
51 #include <linux/module.h>
52 #include <linux/pm_runtime.h>
53 #include <linux/sizes.h>
54 #endif
55
56 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
57 #include <linux/iram_alloc.h>
58 #include <mach/clock.h>
59 #include <mach/hardware.h>
60 #include <mach/mxc_vpu.h>
61 #endif
62
63 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
64 #include <linux/genalloc.h>
65 #include <linux/of.h>
66 #include <linux/reset.h>
67 #include <linux/clk.h>
68 #include <linux/mxc_vpu.h>
69 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
70 #include <mach/busfreq.h>
71 #include <mach/common.h>
72 #else
73 #include <asm/sizes.h>
74 #endif
75
76 /* Define one new pgprot which combined uncached and XN(never executable) */
77 #define pgprot_noncachedxn(prot) \
78         __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
79
80 struct vpu_priv {
81         struct fasync_struct *async_queue;
82         struct work_struct work;
83         struct workqueue_struct *workqueue;
84         struct mutex lock;
85 };
86
87 /* To track the allocated memory buffer */
88 struct memalloc_record {
89         struct list_head list;
90         struct vpu_mem_desc mem;
91 };
92
93 struct iram_setting {
94         u32 start;
95         u32 end;
96 };
97
98 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
99 static struct gen_pool *iram_pool;
100 static u32 iram_base;
101 #endif
102
103 static LIST_HEAD(head);
104
105 static int vpu_major;
106 static int vpu_clk_usercount;
107 static struct class *vpu_class;
108 static struct vpu_priv vpu_data;
109 static u8 open_count;
110 static struct clk *vpu_clk;
111 static struct vpu_mem_desc bitwork_mem = { 0 };
112 static struct vpu_mem_desc pic_para_mem = { 0 };
113 static struct vpu_mem_desc user_data_mem = { 0 };
114 static struct vpu_mem_desc share_mem = { 0 };
115 static struct vpu_mem_desc vshare_mem = { 0 };
116
117 static void __iomem *vpu_base;
118 static int vpu_ipi_irq;
119 static u32 phy_vpu_base_addr;
120 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
121 static phys_addr_t top_address_DRAM;
122 static struct mxc_vpu_platform_data *vpu_plat;
123 #endif
124
125 static struct device *vpu_dev;
126
127 /* IRAM setting */
128 static struct iram_setting iram;
129
130 /* implement the blocking ioctl */
131 static int irq_status;
132 static int codec_done;
133 static wait_queue_head_t vpu_queue;
134
135 #ifdef CONFIG_SOC_IMX6Q
136 #define MXC_VPU_HAS_JPU
137 #endif
138
139 #ifdef MXC_VPU_HAS_JPU
140 static int vpu_jpu_irq;
141 #endif
142
143 #ifdef CONFIG_PM
144 static unsigned int regBk[64];
145 static unsigned int pc_before_suspend;
146 #endif
147 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
148 static struct regulator *vpu_regulator;
149 #endif
150 static atomic_t clk_cnt_from_ioc = ATOMIC_INIT(0);
151
152 #define READ_REG(x)             readl_relaxed(vpu_base + x)
153 #define WRITE_REG(val, x)       writel_relaxed(val, vpu_base + x)
154
155 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
156 /* redirect to static functions */
157 static int cpu_is_mx6dl(void)
158 {
159         int ret;
160         ret = of_machine_is_compatible("fsl,imx6dl");
161         return ret;
162 }
163
164 static int cpu_is_mx6q(void)
165 {
166         int ret;
167         ret = of_machine_is_compatible("fsl,imx6q");
168         return ret;
169 }
170 #endif
171
172 static void vpu_reset(void)
173 {
174 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
175         device_reset(vpu_dev);
176 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
177         imx_src_reset_vpu();
178 #else
179         if (vpu_plat->reset)
180                 vpu_plat->reset();
181 #endif
182 }
183
184 static long vpu_power_get(bool on)
185 {
186         long ret = 0;
187
188         if (on) {
189 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
190                 vpu_regulator = regulator_get(NULL, "cpu_vddvpu");
191                 ret = IS_ERR(vpu_regulator);
192 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
193                 vpu_regulator = devm_regulator_get(vpu_dev, "pu");
194                 ret = IS_ERR(vpu_regulator);
195 #endif
196         } else {
197 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
198                 if (!IS_ERR(vpu_regulator))
199                         regulator_put(vpu_regulator);
200 #endif
201         }
202         return ret;
203 }
204
205 static void vpu_power_up(bool on)
206 {
207 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
208         int ret = 0;
209
210         if (on) {
211                 if (!IS_ERR(vpu_regulator)) {
212                         ret = regulator_enable(vpu_regulator);
213                         if (ret)
214                                 dev_err(vpu_dev, "failed to power up vpu\n");
215                 }
216         } else {
217                 if (!IS_ERR(vpu_regulator)) {
218                         ret = regulator_disable(vpu_regulator);
219                         if (ret)
220                                 dev_err(vpu_dev, "failed to power down vpu\n");
221                 }
222         }
223 #else
224         imx_gpc_power_up_pu(on);
225 #endif
226 }
227
228 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
229 static void request_bus_freq(int freq)
230 {
231 }
232
233 static void release_bus_freq(int freq)
234 {
235 }
236
237 static int cpu_is_mx53(void)
238 {
239         return 0;
240 }
241
242 static int cpu_is_mx51(void)
243 {
244         return 0;
245 }
246
247 #define VM_RESERVED 0
248 #define BUS_FREQ_HIGH 0
249
250 #endif
251
252 /*!
253  * Private function to alloc dma buffer
254  * @return status  0 success.
255  */
256 static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
257 {
258         mem->cpu_addr = (unsigned long)
259             dma_alloc_coherent(NULL, PAGE_ALIGN(mem->size),
260                                (dma_addr_t *) (&mem->phy_addr),
261                                GFP_DMA | GFP_KERNEL);
262         dev_dbg(vpu_dev, "[ALLOC] mem alloc cpu_addr = 0x%x\n", mem->cpu_addr);
263         if ((void *)(mem->cpu_addr) == NULL) {
264                 dev_err(vpu_dev, "Physical memory allocation error!\n");
265                 return -1;
266         }
267         return 0;
268 }
269
270 /*!
271  * Private function to free dma buffer
272  */
273 static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
274 {
275         if (mem->cpu_addr != 0) {
276                 dma_free_coherent(0, PAGE_ALIGN(mem->size),
277                                   (void *)mem->cpu_addr, mem->phy_addr);
278         }
279 }
280
281 /*!
282  * Private function to free buffers
283  * @return status  0 success.
284  */
285 static int vpu_free_buffers(void)
286 {
287         struct memalloc_record *rec, *n;
288         struct vpu_mem_desc mem;
289
290         list_for_each_entry_safe(rec, n, &head, list) {
291                 mem = rec->mem;
292                 if (mem.cpu_addr != 0) {
293                         vpu_free_dma_buffer(&mem);
294                         dev_dbg(vpu_dev, "[FREE] freed paddr=0x%08X\n", mem.phy_addr);
295                         /* delete from list */
296                         list_del(&rec->list);
297                         kfree(rec);
298                 }
299         }
300
301         return 0;
302 }
303
304 static inline void vpu_worker_callback(struct work_struct *w)
305 {
306         struct vpu_priv *dev = container_of(w, struct vpu_priv,
307                                 work);
308
309         if (dev->async_queue)
310                 kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
311
312         irq_status = 1;
313         /*
314          * Clock is gated on when dec/enc started, gate it off when
315          * codec is done.
316          */
317         if (codec_done)
318                 codec_done = 0;
319
320         wake_up_interruptible(&vpu_queue);
321 }
322
323 /*!
324  * @brief vpu interrupt handler
325  */
326 static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
327 {
328         struct vpu_priv *dev = dev_id;
329         unsigned long reg;
330
331         reg = READ_REG(BIT_INT_REASON);
332         if (reg & 0x8)
333                 codec_done = 1;
334         WRITE_REG(0x1, BIT_INT_CLEAR);
335
336         queue_work(dev->workqueue, &dev->work);
337
338         return IRQ_HANDLED;
339 }
340
341 /*!
342  * @brief vpu jpu interrupt handler
343  */
344 #ifdef MXC_VPU_HAS_JPU
345 static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
346 {
347         struct vpu_priv *dev = dev_id;
348         unsigned long reg;
349
350         reg = READ_REG(MJPEG_PIC_STATUS_REG);
351         if (reg & 0x3)
352                 codec_done = 1;
353
354         queue_work(dev->workqueue, &dev->work);
355
356         return IRQ_HANDLED;
357 }
358 #endif
359
360 /*!
361  * @brief check phy memory prepare to pass to vpu is valid or not, we
362  * already address some issue that if pass a wrong address to vpu
363  * (like virtual address), system will hang.
364  *
365  * @return true return is a valid phy memory address, false return not.
366  */
367 bool vpu_is_valid_phy_memory(u32 paddr)
368 {
369 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
370         if (paddr > top_address_DRAM)
371                 return false;
372 #endif
373
374         return true;
375 }
376
377 /*!
378  * @brief open function for vpu file operation
379  *
380  * @return  0 on success or negative error code on error
381  */
382 static int vpu_open(struct inode *inode, struct file *filp)
383 {
384
385         mutex_lock(&vpu_data.lock);
386
387         if (open_count++ == 0) {
388
389 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
390                 pm_runtime_get_sync(vpu_dev);
391 #endif
392                 vpu_power_up(true);
393
394 #ifdef CONFIG_SOC_IMX6Q
395                 clk_prepare(vpu_clk);
396                 clk_enable(vpu_clk);
397                 if (READ_REG(BIT_CUR_PC))
398                         dev_dbg(vpu_dev, "Not power off before vpu open!\n");
399                 clk_disable(vpu_clk);
400                 clk_unprepare(vpu_clk);
401 #endif
402         }
403
404         filp->private_data = (void *)(&vpu_data);
405         mutex_unlock(&vpu_data.lock);
406         return 0;
407 }
408
409 /*!
410  * @brief IO ctrl function for vpu file operation
411  * @param cmd IO ctrl command
412  * @return  0 on success or negative error code on error
413  */
414 static long vpu_ioctl(struct file *filp, u_int cmd,
415                      u_long arg)
416 {
417         int ret = 0;
418
419         switch (cmd) {
420         case VPU_IOC_PHYMEM_ALLOC:
421                 {
422                         struct memalloc_record *rec;
423
424                         rec = kzalloc(sizeof(*rec), GFP_KERNEL);
425                         if (!rec)
426                                 return -ENOMEM;
427
428                         ret = copy_from_user(&(rec->mem),
429                                              (struct vpu_mem_desc *)arg,
430                                              sizeof(struct vpu_mem_desc));
431                         if (ret) {
432                                 kfree(rec);
433                                 return -EFAULT;
434                         }
435
436                         dev_dbg(vpu_dev, "[ALLOC] mem alloc size = 0x%x\n",
437                                  rec->mem.size);
438
439                         ret = vpu_alloc_dma_buffer(&(rec->mem));
440                         if (ret == -1) {
441                                 kfree(rec);
442                                 dev_err(vpu_dev,
443                                         "Physical memory allocation error!\n");
444                                 break;
445                         }
446                         ret = copy_to_user((void __user *)arg, &(rec->mem),
447                                            sizeof(struct vpu_mem_desc));
448                         if (ret) {
449                                 kfree(rec);
450                                 ret = -EFAULT;
451                                 break;
452                         }
453
454                         mutex_lock(&vpu_data.lock);
455                         list_add(&rec->list, &head);
456                         mutex_unlock(&vpu_data.lock);
457
458                         break;
459                 }
460         case VPU_IOC_PHYMEM_FREE:
461                 {
462                         struct memalloc_record *rec, *n;
463                         struct vpu_mem_desc vpu_mem;
464
465                         ret = copy_from_user(&vpu_mem,
466                                              (struct vpu_mem_desc *)arg,
467                                              sizeof(struct vpu_mem_desc));
468                         if (ret)
469                                 return -EACCES;
470
471                         dev_dbg(vpu_dev, "[FREE] mem freed cpu_addr = 0x%x\n",
472                                  vpu_mem.cpu_addr);
473                         if ((void *)vpu_mem.cpu_addr != NULL)
474                                 vpu_free_dma_buffer(&vpu_mem);
475
476                         mutex_lock(&vpu_data.lock);
477                         list_for_each_entry_safe(rec, n, &head, list) {
478                                 if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
479                                         /* delete from list */
480                                         list_del(&rec->list);
481                                         kfree(rec);
482                                         break;
483                                 }
484                         }
485                         mutex_unlock(&vpu_data.lock);
486
487                         break;
488                 }
489         case VPU_IOC_WAIT4INT:
490                 {
491                         u_long timeout = (u_long) arg;
492                         if (!wait_event_interruptible_timeout
493                             (vpu_queue, irq_status != 0,
494                              msecs_to_jiffies(timeout))) {
495                                 dev_warn(vpu_dev, "VPU blocking: timeout.\n");
496                                 ret = -ETIME;
497                         } else if (signal_pending(current)) {
498                                 dev_warn(vpu_dev, "VPU interrupt received.\n");
499                                 ret = -ERESTARTSYS;
500                         } else
501                                 irq_status = 0;
502                         break;
503                 }
504         case VPU_IOC_IRAM_SETTING:
505                 {
506                         ret = copy_to_user((void __user *)arg, &iram,
507                                            sizeof(struct iram_setting));
508                         if (ret)
509                                 ret = -EFAULT;
510
511                         break;
512                 }
513         case VPU_IOC_CLKGATE_SETTING:
514                 {
515                         u32 clkgate_en;
516
517                         if (get_user(clkgate_en, (u32 __user *) arg))
518                                 return -EFAULT;
519
520                         if (clkgate_en) {
521                                 clk_prepare(vpu_clk);
522                                 clk_enable(vpu_clk);
523                                 atomic_inc(&clk_cnt_from_ioc);
524                         } else {
525                                 clk_disable(vpu_clk);
526                                 clk_unprepare(vpu_clk);
527                                 atomic_dec(&clk_cnt_from_ioc);
528                         }
529
530                         break;
531                 }
532         case VPU_IOC_GET_SHARE_MEM:
533                 {
534                         mutex_lock(&vpu_data.lock);
535                         if (share_mem.cpu_addr != 0) {
536                                 ret = copy_to_user((void __user *)arg,
537                                                    &share_mem,
538                                                    sizeof(struct vpu_mem_desc));
539                                 mutex_unlock(&vpu_data.lock);
540                                 break;
541                         } else {
542                                 if (copy_from_user(&share_mem,
543                                                    (struct vpu_mem_desc *)arg,
544                                                  sizeof(struct vpu_mem_desc))) {
545                                         mutex_unlock(&vpu_data.lock);
546                                         return -EFAULT;
547                                 }
548                                 if (vpu_alloc_dma_buffer(&share_mem) == -1)
549                                         ret = -EFAULT;
550                                 else {
551                                         if (copy_to_user((void __user *)arg,
552                                                          &share_mem,
553                                                          sizeof(struct
554                                                                 vpu_mem_desc)))
555                                                 ret = -EFAULT;
556                                 }
557                         }
558                         mutex_unlock(&vpu_data.lock);
559                         break;
560                 }
561         case VPU_IOC_REQ_VSHARE_MEM:
562                 {
563                         mutex_lock(&vpu_data.lock);
564                         if (vshare_mem.cpu_addr != 0) {
565                                 ret = copy_to_user((void __user *)arg,
566                                                    &vshare_mem,
567                                                    sizeof(struct vpu_mem_desc));
568                                 mutex_unlock(&vpu_data.lock);
569                                 break;
570                         } else {
571                                 if (copy_from_user(&vshare_mem,
572                                                    (struct vpu_mem_desc *)arg,
573                                                    sizeof(struct
574                                                           vpu_mem_desc))) {
575                                         mutex_unlock(&vpu_data.lock);
576                                         return -EFAULT;
577                                 }
578                                 /* vmalloc shared memory if not allocated */
579                                 if (!vshare_mem.cpu_addr)
580                                         vshare_mem.cpu_addr =
581                                             (unsigned long)
582                                             vmalloc_user(vshare_mem.size);
583                                 if (copy_to_user
584                                      ((void __user *)arg, &vshare_mem,
585                                      sizeof(struct vpu_mem_desc)))
586                                         ret = -EFAULT;
587                         }
588                         mutex_unlock(&vpu_data.lock);
589                         break;
590                 }
591         case VPU_IOC_GET_WORK_ADDR:
592                 {
593                         if (bitwork_mem.cpu_addr != 0) {
594                                 ret =
595                                     copy_to_user((void __user *)arg,
596                                                  &bitwork_mem,
597                                                  sizeof(struct vpu_mem_desc));
598                                 break;
599                         } else {
600                                 if (copy_from_user(&bitwork_mem,
601                                                    (struct vpu_mem_desc *)arg,
602                                                    sizeof(struct vpu_mem_desc)))
603                                         return -EFAULT;
604
605                                 if (vpu_alloc_dma_buffer(&bitwork_mem) == -1)
606                                         ret = -EFAULT;
607                                 else if (copy_to_user((void __user *)arg,
608                                                       &bitwork_mem,
609                                                       sizeof(struct
610                                                              vpu_mem_desc)))
611                                         ret = -EFAULT;
612                         }
613                         break;
614                 }
615         /*
616          * The following two ioctl is used when user allocates working buffer
617          * and register it to vpu driver.
618          */
619         case VPU_IOC_QUERY_BITWORK_MEM:
620                 {
621                         if (copy_to_user((void __user *)arg,
622                                          &bitwork_mem,
623                                          sizeof(struct vpu_mem_desc)))
624                                 ret = -EFAULT;
625                         break;
626                 }
627         case VPU_IOC_SET_BITWORK_MEM:
628                 {
629                         if (copy_from_user(&bitwork_mem,
630                                            (struct vpu_mem_desc *)arg,
631                                            sizeof(struct vpu_mem_desc)))
632                                 ret = -EFAULT;
633                         break;
634                 }
635         case VPU_IOC_SYS_SW_RESET:
636                 {
637                         vpu_reset();
638                         break;
639                 }
640         case VPU_IOC_REG_DUMP:
641                 break;
642         case VPU_IOC_PHYMEM_DUMP:
643                 break;
644         case VPU_IOC_PHYMEM_CHECK:
645         {
646                 struct vpu_mem_desc check_memory;
647                 ret = copy_from_user(&check_memory,
648                                      (void __user *)arg,
649                                      sizeof(struct vpu_mem_desc));
650                 if (ret != 0) {
651                         dev_err(vpu_dev, "copy from user failure:%d\n", ret);
652                         ret = -EFAULT;
653                         break;
654                 }
655                 ret = vpu_is_valid_phy_memory((u32)check_memory.phy_addr);
656
657                 dev_dbg(vpu_dev, "vpu: memory phy:0x%x %s phy memory\n",
658                        check_memory.phy_addr, (ret ? "is" : "isn't"));
659                 /* borrow .size to pass back the result. */
660                 check_memory.size = ret;
661                 ret = copy_to_user((void __user *)arg, &check_memory,
662                                    sizeof(struct vpu_mem_desc));
663                 if (ret) {
664                         ret = -EFAULT;
665                         break;
666                 }
667                 break;
668         }
669         case VPU_IOC_LOCK_DEV:
670                 {
671                         u32 lock_en;
672
673                         if (get_user(lock_en, (u32 __user *) arg))
674                                 return -EFAULT;
675
676                         if (lock_en)
677                                 mutex_lock(&vpu_data.lock);
678                         else
679                                 mutex_unlock(&vpu_data.lock);
680
681                         break;
682                 }
683         default:
684                 {
685                         dev_err(vpu_dev, "No such IOCTL, cmd is %d\n", cmd);
686                         ret = -EINVAL;
687                         break;
688                 }
689         }
690         return ret;
691 }
692
693 /*!
694  * @brief Release function for vpu file operation
695  * @return  0 on success or negative error code on error
696  */
697 static int vpu_release(struct inode *inode, struct file *filp)
698 {
699         int i;
700         unsigned long timeout;
701
702         mutex_lock(&vpu_data.lock);
703
704         if (open_count > 0 && !(--open_count)) {
705
706                 /* Wait for vpu go to idle state */
707                 clk_prepare(vpu_clk);
708                 clk_enable(vpu_clk);
709                 if (READ_REG(BIT_CUR_PC)) {
710
711                         timeout = jiffies + HZ;
712                         while (READ_REG(BIT_BUSY_FLAG)) {
713                                 msleep(1);
714                                 if (time_after(jiffies, timeout)) {
715                                         dev_warn(vpu_dev, "VPU timeout during release\n");
716                                         break;
717                                 }
718                         }
719                         clk_disable(vpu_clk);
720                         clk_unprepare(vpu_clk);
721
722                         /* Clean up interrupt */
723                         cancel_work_sync(&vpu_data.work);
724                         flush_workqueue(vpu_data.workqueue);
725                         irq_status = 0;
726
727                         clk_prepare(vpu_clk);
728                         clk_enable(vpu_clk);
729                         if (READ_REG(BIT_BUSY_FLAG)) {
730
731                                 if (cpu_is_mx51() || cpu_is_mx53()) {
732                                         dev_err(vpu_dev,
733                                                 "fatal error: can't gate/power off when VPU is busy\n");
734                                         clk_disable(vpu_clk);
735                                         clk_unprepare(vpu_clk);
736                                         mutex_unlock(&vpu_data.lock);
737                                         return -EFAULT;
738                                 }
739
740 #ifdef CONFIG_SOC_IMX6Q
741                                 if (cpu_is_mx6dl() || cpu_is_mx6q()) {
742                                         WRITE_REG(0x11, 0x10F0);
743                                         timeout = jiffies + HZ;
744                                         while (READ_REG(0x10F4) != 0x77) {
745                                                 msleep(1);
746                                                 if (time_after(jiffies, timeout))
747                                                         break;
748                                         }
749
750                                         if (READ_REG(0x10F4) != 0x77) {
751                                                 dev_err(vpu_dev,
752                                                         "fatal error: can't gate/power off when VPU is busy\n");
753                                                 WRITE_REG(0x0, 0x10F0);
754                                                 clk_disable(vpu_clk);
755                                                 clk_unprepare(vpu_clk);
756                                                 mutex_unlock(&vpu_data.lock);
757                                                 return -EFAULT;
758                                         } else
759                                                 vpu_reset();
760                                 }
761 #endif
762                         }
763                 }
764                 clk_disable(vpu_clk);
765                 clk_unprepare(vpu_clk);
766
767                 vpu_free_buffers();
768
769                 /* Free shared memory when vpu device is idle */
770                 vpu_free_dma_buffer(&share_mem);
771                 share_mem.cpu_addr = 0;
772                 vfree((void *)vshare_mem.cpu_addr);
773                 vshare_mem.cpu_addr = 0;
774
775                 vpu_clk_usercount = atomic_read(&clk_cnt_from_ioc);
776                 for (i = 0; i < vpu_clk_usercount; i++) {
777                         clk_disable(vpu_clk);
778                         clk_unprepare(vpu_clk);
779                         atomic_dec(&clk_cnt_from_ioc);
780                 }
781
782                 vpu_power_up(false);
783 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
784                 pm_runtime_put_sync_suspend(vpu_dev);
785 #endif
786
787         }
788         mutex_unlock(&vpu_data.lock);
789
790         return 0;
791 }
792
793 /*!
794  * @brief fasync function for vpu file operation
795  * @return  0 on success or negative error code on error
796  */
797 static int vpu_fasync(int fd, struct file *filp, int mode)
798 {
799         struct vpu_priv *dev = (struct vpu_priv *)filp->private_data;
800         return fasync_helper(fd, filp, mode, &dev->async_queue);
801 }
802
803 /*!
804  * @brief memory map function of harware registers for vpu file operation
805  * @return  0 on success or negative error code on error
806  */
807 static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
808 {
809         unsigned long pfn;
810
811         vm->vm_flags |= VM_IO | VM_RESERVED;
812         /*
813          * Since vpu registers have been mapped with ioremap() at probe
814          * which L_PTE_XN is 1, and the same physical address must be
815          * mapped multiple times with same type, so set L_PTE_XN to 1 here.
816          * Otherwise, there may be unexpected result in video codec.
817          */
818         vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
819         pfn = phy_vpu_base_addr >> PAGE_SHIFT;
820         dev_dbg(vpu_dev, "size=0x%x, page no.=0x%x\n",
821                  (int)(vm->vm_end - vm->vm_start), (int)pfn);
822         return remap_pfn_range(vm, vm->vm_start, pfn, vm->vm_end - vm->vm_start,
823                                vm->vm_page_prot) ? -EAGAIN : 0;
824 }
825
826 /*!
827  * @brief memory map function of memory for vpu file operation
828  * @return  0 on success or negative error code on error
829  */
830 static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
831 {
832         int request_size;
833         request_size = vm->vm_end - vm->vm_start;
834
835         dev_dbg(vpu_dev, "start=0x%x, pgoff=0x%x, size=0x%x\n",
836                  (unsigned int)(vm->vm_start), (unsigned int)(vm->vm_pgoff),
837                  request_size);
838
839         vm->vm_flags |= VM_IO | VM_RESERVED;
840         vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
841
842         return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
843                                request_size, vm->vm_page_prot) ? -EAGAIN : 0;
844
845 }
846
847 /* !
848  * @brief memory map function of vmalloced share memory
849  * @return  0 on success or negative error code on error
850  */
851 static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
852 {
853         int ret = -EINVAL;
854
855         ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
856         vm->vm_flags |= VM_IO;
857
858         return ret;
859 }
860 /*!
861  * @brief memory map interface for vpu file operation
862  * @return  0 on success or negative error code on error
863  */
864 static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
865 {
866         unsigned long offset;
867
868         offset = vshare_mem.cpu_addr >> PAGE_SHIFT;
869
870         if (vm->vm_pgoff && (vm->vm_pgoff == offset))
871                 return vpu_map_vshare_mem(fp, vm);
872         else if (vm->vm_pgoff)
873                 return vpu_map_dma_mem(fp, vm);
874         else
875                 return vpu_map_hwregs(fp, vm);
876 }
877
878 const struct file_operations vpu_fops = {
879         .owner = THIS_MODULE,
880         .open = vpu_open,
881         .unlocked_ioctl = vpu_ioctl,
882         .release = vpu_release,
883         .fasync = vpu_fasync,
884         .mmap = vpu_mmap,
885 };
886
887 /*!
888  * This function is called by the driver framework to initialize the vpu device.
889  * @param   dev The device structure for the vpu passed in by the framework.
890  * @return   0 on success or negative error code on error
891  */
892 static int vpu_dev_probe(struct platform_device *pdev)
893 {
894         int err = 0;
895         struct device *temp_class;
896         struct resource *res;
897         unsigned long addr = 0;
898
899 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
900         struct device_node *np = pdev->dev.of_node;
901         u32 iramsize;
902
903         err = of_property_read_u32(np, "iramsize", (u32 *)&iramsize);
904         if (!err && iramsize)
905 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
906         {
907                 iram_pool = of_get_named_gen_pool(np, "iram", 0);
908                 if (!iram_pool) {
909                         dev_err(&pdev->dev, "iram pool not available\n");
910                         return -ENOMEM;
911                 }
912
913                 iram_base = gen_pool_alloc(iram_pool, iramsize);
914                 if (!iram_base) {
915                         dev_err(&pdev->dev, "unable to alloc iram\n");
916                         return -ENOMEM;
917                 }
918
919                 addr = gen_pool_virt_to_phys(iram_pool, iram_base);
920         }
921 #else
922                 iram_alloc(iramsize, &addr);
923 #endif
924         if (addr == 0)
925                 iram.start = iram.end = 0;
926         else {
927                 iram.start = addr;
928                 iram.end = addr + iramsize - 1;
929         }
930 #else
931
932         vpu_plat = pdev->dev.platform_data;
933
934         if (vpu_plat && vpu_plat->iram_enable && vpu_plat->iram_size)
935                 iram_alloc(vpu_plat->iram_size, &addr);
936         if (addr == 0)
937                 iram.start = iram.end = 0;
938         else {
939                 iram.start = addr;
940                 iram.end = addr +  vpu_plat->iram_size - 1;
941         }
942 #endif
943
944         vpu_dev = &pdev->dev;
945
946         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
947         if (!res) {
948                 dev_err(vpu_dev, "vpu: unable to get vpu base addr\n");
949                 return -ENODEV;
950         }
951         phy_vpu_base_addr = res->start;
952         vpu_base = ioremap(res->start, res->end - res->start);
953
954         vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
955         if (vpu_major < 0) {
956                 dev_err(vpu_dev, "vpu: unable to get a major for VPU\n");
957                 err = -EBUSY;
958                 goto error;
959         }
960
961         vpu_class = class_create(THIS_MODULE, "mxc_vpu");
962         if (IS_ERR(vpu_class)) {
963                 err = PTR_ERR(vpu_class);
964                 goto err_out_chrdev;
965         }
966
967         temp_class = device_create(vpu_class, NULL, MKDEV(vpu_major, 0),
968                                    NULL, "mxc_vpu");
969         if (IS_ERR(temp_class)) {
970                 err = PTR_ERR(temp_class);
971                 goto err_out_class;
972         }
973
974         vpu_clk = clk_get(&pdev->dev, "vpu_clk");
975         if (IS_ERR(vpu_clk)) {
976                 err = -ENOENT;
977                 goto err_out_class;
978         }
979
980         vpu_ipi_irq = platform_get_irq_byname(pdev, "vpu_ipi_irq");
981         if (vpu_ipi_irq < 0) {
982                 dev_err(vpu_dev, "vpu: unable to get vpu interrupt\n");
983                 err = -ENXIO;
984                 goto err_out_class;
985         }
986         err = request_irq(vpu_ipi_irq, vpu_ipi_irq_handler, 0, "VPU_CODEC_IRQ",
987                           (void *)(&vpu_data));
988         if (err)
989                 goto err_out_class;
990         if (vpu_power_get(true)) {
991                 if (!(cpu_is_mx51() || cpu_is_mx53())) {
992                         dev_err(vpu_dev, "failed to get vpu power\n");
993                         goto err_out_class;
994                 } else {
995                         /* regulator_get will return error on MX5x,
996                          * just igore it everywhere*/
997                         dev_warn(vpu_dev, "failed to get vpu power\n");
998                 }
999         }
1000
1001 #ifdef MXC_VPU_HAS_JPU
1002         vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
1003         if (vpu_jpu_irq < 0) {
1004                 dev_err(vpu_dev, "vpu: unable to get vpu jpu interrupt\n");
1005                 err = -ENXIO;
1006                 free_irq(vpu_ipi_irq, &vpu_data);
1007                 goto err_out_class;
1008         }
1009         err = request_irq(vpu_jpu_irq, vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
1010                           "VPU_JPG_IRQ", (void *)(&vpu_data));
1011         if (err) {
1012                 free_irq(vpu_ipi_irq, &vpu_data);
1013                 goto err_out_class;
1014         }
1015 #endif
1016
1017 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1018         pm_runtime_enable(&pdev->dev);
1019 #endif
1020
1021         vpu_data.workqueue = create_workqueue("vpu_wq");
1022         INIT_WORK(&vpu_data.work, vpu_worker_callback);
1023         mutex_init(&vpu_data.lock);
1024         dev_info(vpu_dev, "VPU initialized\n");
1025         goto out;
1026
1027 err_out_class:
1028         device_destroy(vpu_class, MKDEV(vpu_major, 0));
1029         class_destroy(vpu_class);
1030 err_out_chrdev:
1031         unregister_chrdev(vpu_major, "mxc_vpu");
1032 error:
1033         iounmap(vpu_base);
1034 out:
1035         return err;
1036 }
1037
1038 static int vpu_dev_remove(struct platform_device *pdev)
1039 {
1040 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1041         pm_runtime_disable(&pdev->dev);
1042 #endif
1043         free_irq(vpu_ipi_irq, &vpu_data);
1044 #ifdef MXC_VPU_HAS_JPU
1045         free_irq(vpu_jpu_irq, &vpu_data);
1046 #endif
1047         cancel_work_sync(&vpu_data.work);
1048         flush_workqueue(vpu_data.workqueue);
1049         destroy_workqueue(vpu_data.workqueue);
1050
1051         iounmap(vpu_base);
1052 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1053         if (iram.start)
1054 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
1055                 gen_pool_free(iram_pool, iram_base, iram.end-iram.start+1);
1056 #else
1057                 iram_free(iram.start, iram.end-iram.start+1);
1058 #endif
1059 #else
1060         if (vpu_plat && vpu_plat->iram_enable && vpu_plat->iram_size)
1061                 iram_free(iram.start,  vpu_plat->iram_size);
1062 #endif
1063
1064         vpu_power_get(false);
1065         return 0;
1066 }
1067
1068 #ifdef CONFIG_PM
1069 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1070 static int vpu_suspend(struct device *dev)
1071 #else
1072 static int vpu_suspend(struct platform_device *pdev, pm_message_t state)
1073 #endif
1074 {
1075         int i;
1076         unsigned long timeout;
1077
1078         mutex_lock(&vpu_data.lock);
1079         if (open_count == 0) {
1080                 /* VPU is released (all instances are freed),
1081                  * clock is already off, context is no longer needed,
1082                  * power is already off on MX6,
1083                  * gate power on MX51 */
1084                 if (cpu_is_mx51()) {
1085 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1086                         if (vpu_plat->pg)
1087                                 vpu_plat->pg(1);
1088 #endif
1089                 }
1090         } else {
1091                 /* Wait for vpu go to idle state, suspect vpu cannot be changed
1092                    to idle state after about 1 sec */
1093                 timeout = jiffies + HZ;
1094                 clk_prepare(vpu_clk);
1095                 clk_enable(vpu_clk);
1096                 while (READ_REG(BIT_BUSY_FLAG)) {
1097                         msleep(1);
1098                         if (time_after(jiffies, timeout)) {
1099                                 clk_disable(vpu_clk);
1100                                 clk_unprepare(vpu_clk);
1101                                 mutex_unlock(&vpu_data.lock);
1102                                 return -EAGAIN;
1103                         }
1104                 }
1105                 clk_disable(vpu_clk);
1106                 clk_unprepare(vpu_clk);
1107
1108                 /* Make sure clock is disabled before suspend */
1109                 vpu_clk_usercount = atomic_read(&clk_cnt_from_ioc);
1110                 for (i = 0; i < vpu_clk_usercount; i++) {
1111                         clk_disable(vpu_clk);
1112                         clk_unprepare(vpu_clk);
1113                 }
1114
1115                 if (cpu_is_mx53()) {
1116                         mutex_unlock(&vpu_data.lock);
1117                         return 0;
1118                 }
1119
1120                 if (bitwork_mem.cpu_addr != 0) {
1121                         clk_prepare(vpu_clk);
1122                         clk_enable(vpu_clk);
1123                         /* Save 64 registers from BIT_CODE_BUF_ADDR */
1124                         for (i = 0; i < 64; i++)
1125                                 regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
1126                         pc_before_suspend = READ_REG(BIT_CUR_PC);
1127                         clk_disable(vpu_clk);
1128                         clk_unprepare(vpu_clk);
1129                 }
1130
1131 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1132                 if (vpu_plat->pg)
1133                         vpu_plat->pg(1);
1134 #endif
1135
1136                 /* If VPU is working before suspend, disable
1137                  * regulator to make usecount right. */
1138                 vpu_power_up(false);
1139         }
1140
1141         mutex_unlock(&vpu_data.lock);
1142         return 0;
1143 }
1144
1145 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1146 static int vpu_resume(struct device *dev)
1147 #else
1148 static int vpu_resume(struct platform_device *pdev)
1149 #endif
1150 {
1151         int i;
1152
1153         mutex_lock(&vpu_data.lock);
1154         if (open_count == 0) {
1155                 /* VPU is released (all instances are freed),
1156                  * clock should be kept off, context is no longer needed,
1157                  * power should be kept off on MX6,
1158                  * disable power gating on MX51 */
1159                 if (cpu_is_mx51()) {
1160 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1161                         if (vpu_plat->pg)
1162                                 vpu_plat->pg(0);
1163 #endif
1164                 }
1165         } else {
1166                 if (cpu_is_mx53())
1167                         goto recover_clk;
1168
1169                 /* If VPU is working before suspend, enable
1170                  * regulator to make usecount right. */
1171                 vpu_power_up(true);
1172 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1173                 if (vpu_plat->pg)
1174                         vpu_plat->pg(0);
1175 #endif
1176
1177                 if (bitwork_mem.cpu_addr != 0) {
1178                         u32 *p = (u32 *) bitwork_mem.cpu_addr;
1179                         u32 data, pc;
1180                         u16 data_hi;
1181                         u16 data_lo;
1182
1183                         clk_prepare(vpu_clk);
1184                         clk_enable(vpu_clk);
1185
1186                         pc = READ_REG(BIT_CUR_PC);
1187                         if (pc) {
1188                                 dev_warn(vpu_dev, "Not power off after suspend (PC=0x%x)\n", pc);
1189                                 clk_disable(vpu_clk);
1190                                 clk_unprepare(vpu_clk);
1191                                 goto recover_clk;
1192                         }
1193
1194                         /* Restore registers */
1195                         for (i = 0; i < 64; i++)
1196                                 WRITE_REG(regBk[i], BIT_CODE_BUF_ADDR + (i * 4));
1197
1198                         WRITE_REG(0x0, BIT_RESET_CTRL);
1199                         WRITE_REG(0x0, BIT_CODE_RUN);
1200                         /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
1201 #ifdef CONFIG_SOC_IMX6Q
1202                         WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
1203 #endif
1204
1205                         /*
1206                          * Re-load boot code, from the codebuffer in external RAM.
1207                          * Thankfully, we only need 4096 bytes, same for all platforms.
1208                          */
1209                         for (i = 0; i < 2048; i += 4) {
1210                                 data = p[(i / 2) + 1];
1211                                 data_hi = (data >> 16) & 0xFFFF;
1212                                 data_lo = data & 0xFFFF;
1213                                 WRITE_REG((i << 16) | data_hi, BIT_CODE_DOWN);
1214                                 WRITE_REG(((i + 1) << 16) | data_lo,
1215                                                 BIT_CODE_DOWN);
1216
1217                                 data = p[i / 2];
1218                                 data_hi = (data >> 16) & 0xFFFF;
1219                                 data_lo = data & 0xFFFF;
1220                                 WRITE_REG(((i + 2) << 16) | data_hi,
1221                                                 BIT_CODE_DOWN);
1222                                 WRITE_REG(((i + 3) << 16) | data_lo,
1223                                                 BIT_CODE_DOWN);
1224                         }
1225
1226                         if (pc_before_suspend) {
1227                                 WRITE_REG(0x1, BIT_BUSY_FLAG);
1228                                 WRITE_REG(0x1, BIT_CODE_RUN);
1229                                 while (READ_REG(BIT_BUSY_FLAG))
1230                                         ;
1231                         } else {
1232                                 dev_warn(vpu_dev, "PC=0 before suspend\n");
1233                         }
1234                         clk_disable(vpu_clk);
1235                         clk_unprepare(vpu_clk);
1236                 }
1237
1238 recover_clk:
1239                 /* Recover vpu clock */
1240                 for (i = 0; i < vpu_clk_usercount; i++) {
1241                         clk_prepare(vpu_clk);
1242                         clk_enable(vpu_clk);
1243                 }
1244         }
1245
1246         mutex_unlock(&vpu_data.lock);
1247         return 0;
1248 }
1249
1250 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1251 static int vpu_runtime_suspend(struct device *dev)
1252 {
1253         release_bus_freq(BUS_FREQ_HIGH);
1254         return 0;
1255 }
1256
1257 static int vpu_runtime_resume(struct device *dev)
1258 {
1259         request_bus_freq(BUS_FREQ_HIGH);
1260         return 0;
1261 }
1262
1263 static const struct dev_pm_ops vpu_pm_ops = {
1264         SET_RUNTIME_PM_OPS(vpu_runtime_suspend, vpu_runtime_resume, NULL)
1265         SET_SYSTEM_SLEEP_PM_OPS(vpu_suspend, vpu_resume)
1266 };
1267 #endif
1268
1269 #else
1270 #define vpu_suspend     NULL
1271 #define vpu_resume      NULL
1272 #endif                          /* !CONFIG_PM */
1273
1274 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1275 static const struct of_device_id vpu_of_match[] = {
1276         { .compatible = "fsl,imx6-vpu", },
1277         {/* sentinel */}
1278 };
1279 MODULE_DEVICE_TABLE(of, vpu_of_match);
1280 #endif
1281
1282 /*! Driver definition
1283  *
1284  */
1285 static struct platform_driver mxcvpu_driver = {
1286         .driver = {
1287                    .name = "mxc_vpu",
1288 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1289                    .of_match_table = vpu_of_match,
1290 #ifdef CONFIG_PM
1291                    .pm = &vpu_pm_ops,
1292 #endif
1293 #endif
1294                    },
1295         .probe = vpu_dev_probe,
1296         .remove = vpu_dev_remove,
1297 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1298         .suspend = vpu_suspend,
1299         .resume = vpu_resume,
1300 #endif
1301 };
1302
1303 static int __init vpu_init(void)
1304 {
1305         int ret = platform_driver_register(&mxcvpu_driver);
1306
1307         init_waitqueue_head(&vpu_queue);
1308
1309
1310 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1311         memblock_analyze();
1312         top_address_DRAM = memblock_end_of_DRAM_with_reserved();
1313 #endif
1314
1315         return ret;
1316 }
1317
1318 static void __exit vpu_exit(void)
1319 {
1320         if (vpu_major > 0) {
1321                 device_destroy(vpu_class, MKDEV(vpu_major, 0));
1322                 class_destroy(vpu_class);
1323                 unregister_chrdev(vpu_major, "mxc_vpu");
1324                 vpu_major = 0;
1325         }
1326
1327         vpu_free_dma_buffer(&bitwork_mem);
1328         vpu_free_dma_buffer(&pic_para_mem);
1329         vpu_free_dma_buffer(&user_data_mem);
1330
1331         /* reset VPU state */
1332         vpu_power_up(true);
1333         clk_prepare(vpu_clk);
1334         clk_enable(vpu_clk);
1335         vpu_reset();
1336         clk_disable(vpu_clk);
1337         clk_unprepare(vpu_clk);
1338         vpu_power_up(false);
1339
1340         clk_put(vpu_clk);
1341
1342         platform_driver_unregister(&mxcvpu_driver);
1343         return;
1344 }
1345
1346 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1347 MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
1348 MODULE_LICENSE("GPL");
1349
1350 module_init(vpu_init);
1351 module_exit(vpu_exit);