]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/mxc/vpu/mxc_vpu.c
86cdf4caf956e991c29a6bb6f1ed2d54d4fd5d9a
[karo-tx-linux.git] / drivers / mxc / vpu / mxc_vpu.c
1 /*
2  * Copyright 2006-2013 Freescale Semiconductor, Inc. All Rights Reserved.
3  */
4
5 /*
6  * The code contained herein is licensed under the GNU General Public
7  * License. You may obtain a copy of the GNU General Public License
8  * Version 2 or later at the following locations:
9  *
10  * http://www.opensource.org/licenses/gpl-license.html
11  * http://www.gnu.org/copyleft/gpl.html
12  */
13
14 /*!
15  * @file mxc_vpu.c
16  *
17  * @brief VPU system initialization and file operation implementation
18  *
19  * @ingroup VPU
20  */
21
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/stat.h>
27 #include <linux/platform_device.h>
28 #include <linux/kdev_t.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/wait.h>
31 #include <linux/list.h>
32 #include <linux/clk.h>
33 #include <linux/delay.h>
34 #include <linux/fsl_devices.h>
35 #include <linux/uaccess.h>
36 #include <linux/io.h>
37 #include <linux/slab.h>
38 #include <linux/workqueue.h>
39 #include <linux/sched.h>
40 #include <linux/vmalloc.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/page-flags.h>
43 #include <linux/mm_types.h>
44 #include <linux/types.h>
45 #include <linux/memblock.h>
46 #include <linux/memory.h>
47 #include <linux/version.h>
48 #include <linux/module.h>
49 #include <linux/pm_runtime.h>
50 #include <linux/sizes.h>
51 #include <linux/genalloc.h>
52 #include <linux/of.h>
53 #include <linux/of_device.h>
54 #include <linux/reset.h>
55 #include <linux/clk.h>
56 #include <linux/mxc_vpu.h>
57
58 /* Define one new pgprot which combined uncached and XN(never executable) */
59 #define pgprot_noncachedxn(prot) \
60         __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
61
62 struct vpu_priv {
63         struct fasync_struct *async_queue;
64         struct work_struct work;
65         struct workqueue_struct *workqueue;
66         struct mutex lock;
67         const struct mxc_vpu_soc_data *soc_data;
68         int clk_enabled;
69 };
70
71 struct vpu_user_data {
72         struct vpu_priv *vpu_data;
73         int clk_enable_cnt;
74 };
75
76 /* To track the allocated memory buffer */
77 struct memalloc_record {
78         struct list_head list;
79         struct vpu_mem_desc mem;
80 };
81
82 struct iram_setting {
83         u32 start;
84         u32 end;
85 };
86
87 struct mxc_vpu_soc_data {
88         unsigned vpu_pwr_mgmnt:1,
89                 regulator_required:1,
90                 quirk_subblk_en:1,
91                 is_mx51:1,
92                 is_mx53:1,
93                 is_mx6dl:1,
94                 is_mx6q:1,
95                 has_jpu:1;
96 };
97
98 static struct gen_pool *iram_pool;
99 static u32 iram_base;
100
101 static LIST_HEAD(mem_list);
102
103 static int vpu_major;
104 static struct class *vpu_class;
105 static struct vpu_priv *vpu_data;
106 static u8 open_count;
107 static struct clk *vpu_clk;
108 static struct vpu_mem_desc bitwork_mem;
109 static struct vpu_mem_desc pic_para_mem;
110 static struct vpu_mem_desc user_data_mem;
111 static struct vpu_mem_desc share_mem;
112 static struct vpu_mem_desc vshare_mem;
113
114 static void __iomem *vpu_base;
115 static int vpu_ipi_irq;
116 static u32 phy_vpu_base_addr;
117
118 static struct device *vpu_dev;
119
120 /* IRAM setting */
121 static struct iram_setting iram;
122
123 /* implement the blocking ioctl */
124 static int irq_status;
125 static int codec_done;
126 static wait_queue_head_t vpu_queue;
127
128 static int vpu_jpu_irq;
129
130 #ifdef CONFIG_PM_SLEEP
131 static unsigned int regBk[64];
132 static unsigned int pc_before_suspend;
133 #endif
134 static struct regulator *vpu_regulator;
135
136 #define READ_REG(x)             readl_relaxed(vpu_base + (x))
137 #define WRITE_REG(val, x)       writel_relaxed(val, vpu_base + (x))
138
139 static int vpu_clk_enable(struct vpu_priv *vpu_data)
140 {
141         if (WARN_ON(vpu_data->clk_enabled < 0))
142                 return -EINVAL;
143
144         if (vpu_data->clk_enabled++ == 0)
145                 return clk_prepare_enable(vpu_clk);
146
147         return 0;
148 }
149
150 static int vpu_clk_disable(struct vpu_priv *vpu_data)
151 {
152         if (WARN_ON(vpu_data->clk_enabled <= 0))
153                 return -EINVAL;
154
155         if (--vpu_data->clk_enabled == 0)
156                 clk_disable_unprepare(vpu_clk);
157         return 0;
158 }
159
160 static inline int vpu_reset(void)
161 {
162         return device_reset(vpu_dev);
163 }
164
165 static void vpu_power_up(void)
166 {
167         int ret;
168
169         if (IS_ERR(vpu_regulator))
170                 return;
171
172         ret = regulator_enable(vpu_regulator);
173         if (ret)
174                 dev_err(vpu_dev, "failed to power up vpu: %d\n", ret);
175 }
176
177 static void vpu_power_down(void)
178 {
179         int ret;
180
181         if (IS_ERR(vpu_regulator))
182                 return;
183
184         ret = regulator_disable(vpu_regulator);
185         if (ret)
186                 dev_err(vpu_dev, "failed to power down vpu: %d\n", ret);
187 }
188
189 /*!
190  * Private function to alloc dma buffer
191  * @return status  0 success.
192  */
193 static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
194 {
195         mem->cpu_addr = dma_alloc_coherent(vpu_dev, PAGE_ALIGN(mem->size),
196                                         &mem->phy_addr,
197                                         GFP_DMA | GFP_KERNEL);
198         dev_dbg(vpu_dev, "[ALLOC] mem alloc cpu_addr = %p\n", mem->cpu_addr);
199         if (mem->cpu_addr == NULL) {
200                 dev_err(vpu_dev, "Physical memory allocation error!\n");
201                 return -ENOMEM;
202         }
203         return 0;
204 }
205
206 /*!
207  * Private function to free dma buffer
208  */
209 static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
210 {
211         if (mem->cpu_addr != NULL)
212                 dma_free_coherent(vpu_dev, PAGE_ALIGN(mem->size),
213                                 mem->cpu_addr, mem->phy_addr);
214 }
215
216 /*!
217  * Private function to free buffers
218  * @return status  0 success.
219  */
220 static int vpu_free_buffers(void)
221 {
222         struct memalloc_record *rec, *n;
223         struct vpu_mem_desc mem;
224
225         list_for_each_entry_safe(rec, n, &mem_list, list) {
226                 mem = rec->mem;
227                 if (mem.cpu_addr != 0) {
228                         vpu_free_dma_buffer(&mem);
229                         dev_dbg(vpu_dev, "[FREE] freed paddr=0x%08X\n", mem.phy_addr);
230                         /* delete from list */
231                         list_del(&rec->list);
232                         kfree(rec);
233                 }
234         }
235
236         return 0;
237 }
238
239 static inline void vpu_worker_callback(struct work_struct *w)
240 {
241         struct vpu_priv *dev = container_of(w, struct vpu_priv, work);
242
243         if (dev->async_queue)
244                 kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
245
246         irq_status = 1;
247         /*
248          * Clock is gated on when dec/enc started, gate it off when
249          * codec is done.
250          */
251         if (codec_done)
252                 codec_done = 0;
253
254         wake_up_interruptible(&vpu_queue);
255 }
256
257 /*!
258  * @brief vpu interrupt handler
259  */
260 static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
261 {
262         struct vpu_priv *dev = dev_id;
263         unsigned long reg;
264
265         reg = READ_REG(BIT_INT_REASON);
266         if (reg & 0x8)
267                 codec_done = 1;
268         WRITE_REG(0x1, BIT_INT_CLEAR);
269
270         queue_work(dev->workqueue, &dev->work);
271
272         return IRQ_HANDLED;
273 }
274
275 /*!
276  * @brief vpu jpu interrupt handler
277  */
278 static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
279 {
280         struct vpu_priv *dev = dev_id;
281         unsigned long reg;
282
283         reg = READ_REG(MJPEG_PIC_STATUS_REG);
284         if (reg & 0x3)
285                 codec_done = 1;
286
287         queue_work(dev->workqueue, &dev->work);
288
289         return IRQ_HANDLED;
290 }
291
292 /*!
293  * @brief open function for vpu file operation
294  *
295  * @return  0 on success or negative error code on error
296  */
297 static int vpu_open(struct inode *inode, struct file *filp)
298 {
299         struct vpu_user_data *user_data = devm_kzalloc(vpu_dev,
300                                                 sizeof(*user_data),
301                                                 GFP_KERNEL);
302         if (user_data == NULL)
303                 return -ENOMEM;
304
305         user_data->vpu_data = vpu_data;
306
307         mutex_lock(&vpu_data->lock);
308
309         if (open_count++ == 0) {
310                 pm_runtime_get_sync(vpu_dev);
311                 vpu_power_up();
312         }
313
314         filp->private_data = user_data;
315         mutex_unlock(&vpu_data->lock);
316         return 0;
317 }
318
319 /*!
320  * @brief IO ctrl function for vpu file operation
321  * @param cmd IO ctrl command
322  * @return  0 on success or negative error code on error
323  */
324 static long vpu_ioctl(struct file *filp, u_int cmd,
325                      u_long arg)
326 {
327         int ret;
328         struct vpu_user_data *user_data = filp->private_data;
329         struct vpu_priv *vpu_data = user_data->vpu_data;
330
331         switch (cmd) {
332         case VPU_IOC_PHYMEM_ALLOC:
333         {
334                 struct memalloc_record *rec;
335
336                 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
337                 if (!rec)
338                         return -ENOMEM;
339
340                 if (copy_from_user(&rec->mem,
341                                         (struct vpu_mem_desc *)arg,
342                                         sizeof(struct vpu_mem_desc))) {
343                         kfree(rec);
344                         return -EFAULT;
345                 }
346
347                 dev_dbg(vpu_dev, "[ALLOC] mem alloc size = 0x%x\n",
348                         rec->mem.size);
349
350                 ret = vpu_alloc_dma_buffer(&rec->mem);
351                 if (ret) {
352                         kfree(rec);
353                         return ret;
354                 }
355                 if (copy_to_user((void __user *)arg, &rec->mem,
356                                         sizeof(struct vpu_mem_desc))) {
357                         kfree(rec);
358                         return -EFAULT;
359                 }
360
361                 mutex_lock(&vpu_data->lock);
362                 list_add(&rec->list, &mem_list);
363                 mutex_unlock(&vpu_data->lock);
364
365                 break;
366         }
367         case VPU_IOC_PHYMEM_FREE:
368         {
369                 struct memalloc_record *rec, *n;
370                 struct vpu_mem_desc vpu_mem;
371
372                 if (copy_from_user(&vpu_mem,
373                                         (struct vpu_mem_desc *)arg,
374                                         sizeof(struct vpu_mem_desc)))
375                         return -EFAULT;
376
377                 dev_dbg(vpu_dev, "[FREE] mem freed cpu_addr = %p\n",
378                         vpu_mem.cpu_addr);
379                 if (vpu_mem.cpu_addr != NULL)
380                         vpu_free_dma_buffer(&vpu_mem);
381
382                 mutex_lock(&vpu_data->lock);
383                 list_for_each_entry_safe(rec, n, &mem_list, list) {
384                         if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
385                                 list_del(&rec->list);
386                                 break;
387                         }
388                 }
389                 kfree(rec);
390                 mutex_unlock(&vpu_data->lock);
391
392                 ret = 0;
393                 break;
394         }
395         case VPU_IOC_WAIT4INT:
396         {
397                 u_long timeout = arg;
398
399                 ret = wait_event_interruptible_timeout(vpu_queue,
400                                                 irq_status != 0,
401                                                 msecs_to_jiffies(timeout));
402                 if (ret == 0) {
403                         dev_warn(vpu_dev, "VPU blocking: timeout.\n");
404                         ret = -ETIMEDOUT;
405                 } else if (signal_pending(current)) {
406                         dev_warn(vpu_dev, "VPU interrupt received.\n");
407                         ret = -ERESTARTSYS;
408                 } else {
409                         ret = irq_status = 0;
410                 }
411                 break;
412         }
413         case VPU_IOC_IRAM_SETTING:
414                 ret = copy_to_user((void __user *)arg, &iram,
415                                 sizeof(struct iram_setting));
416                 if (ret)
417                         ret = -EFAULT;
418
419                 break;
420         case VPU_IOC_CLKGATE_SETTING:
421         {
422                 u32 clkgate_en;
423
424                 if (get_user(clkgate_en, (u32 __user *)arg))
425                         return -EFAULT;
426
427                 mutex_lock(&vpu_data->lock);
428                 if (clkgate_en) {
429                         ret = vpu_clk_enable(vpu_data);
430                         if (ret == 0)
431                                 user_data->clk_enable_cnt++;
432                 } else {
433                         if (user_data->clk_enable_cnt == 0) {
434                                 ret = -EINVAL;
435                         } else {
436                                 if (--user_data->clk_enable_cnt == 0)
437                                         vpu_clk_disable(vpu_data);
438                                 ret = 0;
439                         }
440                 }
441                 mutex_unlock(&vpu_data->lock);
442                 break;
443         }
444         case VPU_IOC_GET_SHARE_MEM:
445                 mutex_lock(&vpu_data->lock);
446                 if (share_mem.cpu_addr == NULL) {
447                         if (copy_from_user(&share_mem,
448                                                 (struct vpu_mem_desc *)arg,
449                                                 sizeof(struct vpu_mem_desc))) {
450                                 mutex_unlock(&vpu_data->lock);
451                                 return -EFAULT;
452                         }
453                         ret = vpu_alloc_dma_buffer(&share_mem);
454                         if (ret) {
455                                 mutex_unlock(&vpu_data->lock);
456                                 return ret;
457                         }
458                 }
459                 if (copy_to_user((void __user *)arg,
460                                         &share_mem,
461                                         sizeof(struct vpu_mem_desc)))
462                         ret = -EFAULT;
463                 else
464                         ret = 0;
465                 mutex_unlock(&vpu_data->lock);
466                 break;
467         case VPU_IOC_REQ_VSHARE_MEM:
468                 mutex_lock(&vpu_data->lock);
469                 if (vshare_mem.cpu_addr == NULL) {
470                         if (copy_from_user(&vshare_mem,
471                                                 (struct vpu_mem_desc *)arg,
472                                                 sizeof(struct
473                                                         vpu_mem_desc))) {
474                                 mutex_unlock(&vpu_data->lock);
475                                 return -EFAULT;
476                         }
477                         vshare_mem.cpu_addr = vmalloc_user(vshare_mem.size);
478                         if (vshare_mem.cpu_addr == NULL) {
479                                 mutex_unlock(&vpu_data->lock);
480                                 return -ENOMEM;
481                         }
482                 }
483                 if (copy_to_user((void __user *)arg, &vshare_mem,
484                                         sizeof(struct vpu_mem_desc)))
485                         ret = -EFAULT;
486                 else
487                         ret = 0;
488                 mutex_unlock(&vpu_data->lock);
489                 break;
490         case VPU_IOC_GET_WORK_ADDR:
491                 if (bitwork_mem.cpu_addr == 0) {
492                         if (copy_from_user(&bitwork_mem,
493                                                 (struct vpu_mem_desc *)arg,
494                                                 sizeof(struct vpu_mem_desc)))
495                                 return -EFAULT;
496
497                         ret = vpu_alloc_dma_buffer(&bitwork_mem);
498                         if (ret)
499                                 return ret;
500                 }
501                 if (copy_to_user((void __user *)arg,
502                                         &bitwork_mem,
503                                         sizeof(struct
504                                                 vpu_mem_desc)))
505                         ret = -EFAULT;
506                 else
507                         ret = 0;
508                 break;
509         /*
510          * The following two ioctls are used when user allocates a working buffer
511          * and registers it to vpu driver.
512          */
513         case VPU_IOC_QUERY_BITWORK_MEM:
514                 if (copy_to_user((void __user *)arg,
515                                         &bitwork_mem,
516                                         sizeof(struct vpu_mem_desc)))
517                         ret = -EFAULT;
518                 else
519                         ret = 0;
520                 break;
521         case VPU_IOC_SET_BITWORK_MEM:
522                 if (copy_from_user(&bitwork_mem,
523                                         (struct vpu_mem_desc *)arg,
524                                         sizeof(struct vpu_mem_desc)))
525                         ret = -EFAULT;
526                 else
527                         ret = 0;
528                 break;
529         case VPU_IOC_SYS_SW_RESET:
530                 ret = vpu_reset();
531                 break;
532         case VPU_IOC_REG_DUMP:
533         case VPU_IOC_PHYMEM_DUMP:
534                 ret = -ENOTSUPP;
535                 break;
536         case VPU_IOC_PHYMEM_CHECK:
537         {
538                 struct vpu_mem_desc check_memory;
539
540                 if (copy_from_user(&check_memory, (void __user *)arg,
541                                         sizeof(struct vpu_mem_desc)))
542                         return -EFAULT;
543
544                 check_memory.size = 1;
545                 if (copy_to_user((void __user *)arg, &check_memory,
546                                         sizeof(struct vpu_mem_desc)))
547                         ret = -EFAULT;
548                 else
549                         ret = 0;
550                 break;
551         }
552         case VPU_IOC_LOCK_DEV:
553         {
554                 u32 lock_en;
555
556                 if (get_user(lock_en, (u32 __user *)arg))
557                         return -EFAULT;
558
559                 if (lock_en)
560                         mutex_lock(&vpu_data->lock);
561                 else
562                         mutex_unlock(&vpu_data->lock);
563                 ret = 0;
564                 break;
565         }
566         default:
567                 dev_err(vpu_dev, "No such IOCTL, cmd is %d\n", cmd);
568                 ret = -EINVAL;
569         }
570         return ret;
571 }
572
573 /*!
574  * @brief Release function for vpu file operation
575  * @return  0 on success or negative error code on error
576  */
577 static int vpu_release(struct inode *inode, struct file *filp)
578 {
579         unsigned long timeout;
580         struct vpu_user_data *user_data = filp->private_data;
581         struct vpu_priv *vpu_data = user_data->vpu_data;
582
583         mutex_lock(&vpu_data->lock);
584
585         if (open_count > 0 && !--open_count) {
586                 /* Wait for vpu go to idle state */
587                 vpu_clk_enable(vpu_data);
588                 if (READ_REG(BIT_CUR_PC)) {
589
590                         timeout = jiffies + HZ;
591                         while (READ_REG(BIT_BUSY_FLAG)) {
592                                 msleep(1);
593                                 if (time_after(jiffies, timeout)) {
594                                         dev_warn(vpu_dev, "VPU timeout during release\n");
595                                         break;
596                                 }
597                         }
598
599                         /* Clean up interrupt */
600                         cancel_work_sync(&vpu_data->work);
601                         flush_workqueue(vpu_data->workqueue);
602                         irq_status = 0;
603
604                         if (READ_REG(BIT_BUSY_FLAG)) {
605                                 if (vpu_data->soc_data->is_mx51 ||
606                                         vpu_data->soc_data->is_mx53) {
607                                         dev_err(vpu_dev,
608                                                 "fatal error: can't gate/power off when VPU is busy\n");
609                                         vpu_clk_disable(vpu_data);
610                                         mutex_unlock(&vpu_data->lock);
611                                         return -EBUSY;
612                                 }
613                                 if (vpu_data->soc_data->is_mx6dl ||
614                                         vpu_data->soc_data->is_mx6q) {
615                                         WRITE_REG(0x11, 0x10F0);
616                                         timeout = jiffies + HZ;
617                                         while (READ_REG(0x10F4) != 0x77) {
618                                                 msleep(1);
619                                                 if (time_after(jiffies, timeout))
620                                                         break;
621                                         }
622
623                                         if (READ_REG(0x10F4) != 0x77) {
624                                                 dev_err(vpu_dev,
625                                                         "fatal error: can't gate/power off when VPU is busy\n");
626                                                 WRITE_REG(0x0, 0x10F0);
627                                                 vpu_clk_disable(vpu_data);
628                                                 mutex_unlock(&vpu_data->lock);
629                                                 return -EBUSY;
630                                         }
631                                         vpu_reset();
632                                 }
633                         }
634                 }
635
636                 vpu_free_buffers();
637
638                 /* Free shared memory when vpu device is idle */
639                 vpu_free_dma_buffer(&share_mem);
640                 share_mem.cpu_addr = 0;
641                 vfree(vshare_mem.cpu_addr);
642                 vshare_mem.cpu_addr = 0;
643
644                 if (user_data->clk_enable_cnt)
645                         vpu_clk_disable(vpu_data);
646
647                 vpu_clk_disable(vpu_data);
648                 vpu_power_down();
649                 pm_runtime_put_sync_suspend(vpu_dev);
650                 devm_kfree(vpu_dev, user_data);
651         }
652         mutex_unlock(&vpu_data->lock);
653
654         return 0;
655 }
656
657 /*!
658  * @brief fasync function for vpu file operation
659  * @return  0 on success or negative error code on error
660  */
661 static int vpu_fasync(int fd, struct file *filp, int mode)
662 {
663         struct vpu_user_data *user_data = filp->private_data;
664         struct vpu_priv *vpu_data = user_data->vpu_data;
665         return fasync_helper(fd, filp, mode, &vpu_data->async_queue);
666 }
667
668 /*!
669  * @brief memory map function of harware registers for vpu file operation
670  * @return  0 on success or negative error code on error
671  */
672 static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
673 {
674         unsigned long pfn;
675
676         vm->vm_flags |= VM_IO;
677         /*
678          * Since vpu registers have been mapped with ioremap() at probe
679          * which L_PTE_XN is 1, and the same physical address must be
680          * mapped multiple times with same type, so set L_PTE_XN to 1 here.
681          * Otherwise, there may be unexpected result in video codec.
682          */
683         vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
684         pfn = phy_vpu_base_addr >> PAGE_SHIFT;
685         dev_dbg(vpu_dev, "size=0x%08lx, page no.=0x%08lx\n",
686                  vm->vm_end - vm->vm_start, pfn);
687         return remap_pfn_range(vm, vm->vm_start, pfn,
688                         vm->vm_end - vm->vm_start,
689                         vm->vm_page_prot) ? -EAGAIN : 0;
690 }
691
692 /*!
693  * @brief memory map function of memory for vpu file operation
694  * @return  0 on success or negative error code on error
695  */
696 static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
697 {
698         size_t request_size = vm->vm_end - vm->vm_start;
699
700         dev_dbg(vpu_dev, "start=0x%08lx, pgoff=0x%08lx, size=%zx\n",
701                 vm->vm_start, vm->vm_pgoff, request_size);
702
703         vm->vm_flags |= VM_IO;
704         vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
705
706         return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
707                                request_size, vm->vm_page_prot) ? -EAGAIN : 0;
708 }
709
710 /* !
711  * @brief memory map function of vmalloced share memory
712  * @return  0 on success or negative error code on error
713  */
714 static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
715 {
716         int ret;
717
718         ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
719         vm->vm_flags |= VM_IO;
720         return ret;
721 }
722 /*!
723  * @brief memory map interface for vpu file operation
724  * @return  0 on success or negative error code on error
725  */
726 static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
727 {
728         unsigned long offset;
729
730         offset = (unsigned long)vshare_mem.cpu_addr >> PAGE_SHIFT;
731
732         if (vm->vm_pgoff && (vm->vm_pgoff == offset))
733                 return vpu_map_vshare_mem(fp, vm);
734         else if (vm->vm_pgoff)
735                 return vpu_map_dma_mem(fp, vm);
736         else
737                 return vpu_map_hwregs(fp, vm);
738 }
739
740 static const struct file_operations vpu_fops = {
741         .owner = THIS_MODULE,
742         .open = vpu_open,
743         .unlocked_ioctl = vpu_ioctl,
744         .release = vpu_release,
745         .fasync = vpu_fasync,
746         .mmap = vpu_mmap,
747 };
748
749 static const struct mxc_vpu_soc_data imx6dl_vpu_data = {
750         .regulator_required = 1,
751         .vpu_pwr_mgmnt = 1,
752         .has_jpu = 1,
753 };
754
755 static const struct mxc_vpu_soc_data imx6q_vpu_data = {
756         .quirk_subblk_en = 1,
757         .regulator_required = 1,
758         .vpu_pwr_mgmnt = 1,
759         .has_jpu = 1,
760 };
761
762 static const struct mxc_vpu_soc_data imx53_vpu_data = {
763 };
764
765 static const struct mxc_vpu_soc_data imx51_vpu_data = {
766         .vpu_pwr_mgmnt = 1,
767 };
768
769 static const struct of_device_id vpu_of_match[] = {
770         { .compatible = "fsl,imx6dl-vpu", .data = &imx6dl_vpu_data, },
771         { .compatible = "fsl,imx6q-vpu", .data = &imx6q_vpu_data, },
772         { .compatible = "fsl,imx53-vpu", .data = &imx53_vpu_data, },
773         { .compatible = "fsl,imx51-vpu", .data = &imx51_vpu_data, },
774         { /* sentinel */ }
775 };
776 MODULE_DEVICE_TABLE(of, vpu_of_match);
777
778 /*!
779  * This function is called by the driver framework to initialize the vpu device.
780  * @param   dev The device structure for the vpu passed in by the framework.
781  * @return   0 on success or negative error code on error
782  */
783 static int vpu_dev_probe(struct platform_device *pdev)
784 {
785         int err = 0;
786         struct device *temp_class;
787         struct resource *res;
788         unsigned long addr = 0;
789         struct device_node *np = pdev->dev.of_node;
790         u32 iramsize;
791         struct vpu_priv *drv_data;
792         const struct of_device_id *of_id = of_match_device(vpu_of_match,
793                                                         &pdev->dev);
794         const struct mxc_vpu_soc_data *soc_data = of_id->data;
795
796         drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
797         if (drv_data == NULL)
798                 return -ENOMEM;
799
800         drv_data->soc_data = soc_data;
801         mutex_init(&drv_data->lock);
802
803         init_waitqueue_head(&vpu_queue);
804         drv_data->workqueue = create_workqueue("vpu_wq");
805         INIT_WORK(&drv_data->work, vpu_worker_callback);
806
807         err = of_property_read_u32(np, "iramsize", &iramsize);
808         if (!err && iramsize) {
809                 iram_pool = of_get_named_gen_pool(np, "iram", 0);
810                 if (!iram_pool) {
811                         dev_err(&pdev->dev, "iram pool not available\n");
812                         return -ENOMEM;
813                 }
814
815                 iram_base = gen_pool_alloc(iram_pool, iramsize);
816                 if (!iram_base) {
817                         dev_err(&pdev->dev, "unable to alloc iram\n");
818                         return -ENOMEM;
819                 }
820
821                 addr = gen_pool_virt_to_phys(iram_pool, iram_base);
822         }
823
824         if (addr == 0)
825                 iram.start = iram.end = 0;
826         else {
827                 iram.start = addr;
828                 iram.end = addr + iramsize - 1;
829         }
830
831         vpu_dev = &pdev->dev;
832
833         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
834         vpu_base = devm_ioremap_resource(&pdev->dev, res);
835         if (IS_ERR(vpu_base))
836                 return PTR_ERR(vpu_base);
837         phy_vpu_base_addr = res->start;
838
839         vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
840         if (vpu_major < 0) {
841                 dev_err(vpu_dev, "vpu: unable to get a major for VPU\n");
842                 return vpu_major;
843         }
844
845         vpu_class = class_create(THIS_MODULE, "mxc_vpu");
846         if (IS_ERR(vpu_class)) {
847                 err = PTR_ERR(vpu_class);
848                 goto err_out_chrdev;
849         }
850
851         temp_class = device_create(vpu_class, NULL, MKDEV(vpu_major, 0),
852                                    NULL, "mxc_vpu");
853         if (IS_ERR(temp_class)) {
854                 err = PTR_ERR(temp_class);
855                 goto err_out_class;
856         }
857
858         vpu_clk = clk_get(&pdev->dev, "vpu_clk");
859         if (IS_ERR(vpu_clk)) {
860                 err = PTR_ERR(vpu_clk);
861                 goto err_out_class;
862         }
863
864         vpu_ipi_irq = platform_get_irq_byname(pdev, "vpu_ipi_irq");
865         if (vpu_ipi_irq < 0) {
866                 dev_err(vpu_dev, "vpu: unable to get vpu interrupt\n");
867                 err = vpu_ipi_irq;
868                 goto err_out_class;
869         }
870         err = request_irq(vpu_ipi_irq, vpu_ipi_irq_handler, 0, "VPU_CODEC_IRQ",
871                           drv_data);
872         if (err)
873                 goto err_out_class;
874
875         vpu_regulator = devm_regulator_get(vpu_dev, "pu");
876         if (IS_ERR(vpu_regulator)) {
877                 if (drv_data->soc_data->regulator_required) {
878                         dev_err(vpu_dev, "failed to get vpu power\n");
879                         goto err_out_class;
880                 } else {
881                         /* regulator_get will return error on MX5x,
882                          * just igore it everywhere
883                          */
884                         dev_warn(vpu_dev, "failed to get vpu power\n");
885                 }
886         }
887
888         platform_set_drvdata(pdev, drv_data);
889
890         if (drv_data->soc_data->has_jpu) {
891                 vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
892                 if (vpu_jpu_irq < 0) {
893                         dev_err(vpu_dev, "vpu: unable to get vpu jpu interrupt\n");
894                         err = vpu_jpu_irq;
895                         goto err_out_class;
896                 }
897                 err = request_irq(vpu_jpu_irq, vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
898                                 "VPU_JPG_IRQ", drv_data);
899                 if (err)
900                         goto err_out_class;
901         }
902
903         pm_runtime_enable(&pdev->dev);
904         vpu_data = drv_data;
905
906         dev_info(vpu_dev, "VPU initialized\n");
907         return 0;
908
909 err_out_class:
910         device_destroy(vpu_class, MKDEV(vpu_major, 0));
911         class_destroy(vpu_class);
912 err_out_chrdev:
913         unregister_chrdev(vpu_major, "mxc_vpu");
914         return err;
915 }
916
917 static int vpu_dev_remove(struct platform_device *pdev)
918 {
919         struct vpu_priv *vpu_data = platform_get_drvdata(pdev);
920
921         pm_runtime_disable(&pdev->dev);
922
923         free_irq(vpu_ipi_irq, &vpu_data);
924 #ifdef MXC_VPU_HAS_JPU
925         free_irq(vpu_jpu_irq, &vpu_data);
926 #endif
927         cancel_work_sync(&vpu_data->work);
928         flush_workqueue(vpu_data->workqueue);
929         destroy_workqueue(vpu_data->workqueue);
930
931         if (iram.start)
932                 gen_pool_free(iram_pool, iram_base, iram.end-iram.start+1);
933
934         if (vpu_major > 0) {
935                 device_destroy(vpu_class, MKDEV(vpu_major, 0));
936                 class_destroy(vpu_class);
937                 unregister_chrdev(vpu_major, "mxc_vpu");
938                 vpu_major = 0;
939         }
940
941         vpu_free_dma_buffer(&bitwork_mem);
942         vpu_free_dma_buffer(&pic_para_mem);
943         vpu_free_dma_buffer(&user_data_mem);
944
945         /* reset VPU state */
946         vpu_power_up();
947         vpu_clk_enable(vpu_data);
948         vpu_reset();
949         vpu_clk_disable(vpu_data);
950         vpu_power_down();
951
952         clk_put(vpu_clk);
953         return 0;
954 }
955
956 #ifdef CONFIG_PM_SLEEP
957 static int vpu_suspend(struct device *dev)
958 {
959         struct vpu_priv *vpu_data = dev_get_drvdata(dev);
960         unsigned long timeout;
961
962         mutex_lock(&vpu_data->lock);
963
964         if (open_count) {
965                 /* Wait for vpu go to idle state, suspect vpu cannot be changed
966                  * to idle state after about 1 sec
967                  */
968                 timeout = jiffies + HZ;
969                 while (READ_REG(BIT_BUSY_FLAG)) {
970                         msleep(1);
971                         if (time_after(jiffies, timeout)) {
972                                 mutex_unlock(&vpu_data->lock);
973                                 return -EAGAIN;
974                         }
975                 }
976
977                 if (vpu_data->soc_data->is_mx53) {
978                         mutex_unlock(&vpu_data->lock);
979                         return 0;
980                 }
981
982                 if (bitwork_mem.cpu_addr != 0) {
983                         int i;
984
985                         /* Save 64 registers from BIT_CODE_BUF_ADDR */
986                         for (i = 0; i < 64; i++)
987                                 regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
988                         pc_before_suspend = READ_REG(BIT_CUR_PC);
989                 }
990
991                 vpu_clk_disable(vpu_data);
992                 /* If VPU is working before suspend, disable
993                  * regulator to make usecount right.
994                  */
995                 vpu_power_down();
996         }
997
998         mutex_unlock(&vpu_data->lock);
999         return 0;
1000 }
1001
1002 static int vpu_resume(struct device *dev)
1003 {
1004         int i;
1005         struct vpu_priv *vpu_data = dev_get_drvdata(dev);
1006
1007         mutex_lock(&vpu_data->lock);
1008
1009         if (open_count) {
1010                 if (vpu_data->soc_data->is_mx53) {
1011                         vpu_clk_enable(vpu_data);
1012                         goto out;
1013                 }
1014
1015                 /* If VPU is working before suspend, enable
1016                  * regulator to make usecount right.
1017                  */
1018                 vpu_power_up();
1019
1020                 if (bitwork_mem.cpu_addr != NULL) {
1021                         u32 *p = bitwork_mem.cpu_addr;
1022                         u32 data, pc;
1023                         u16 data_hi;
1024                         u16 data_lo;
1025
1026                         vpu_clk_enable(vpu_data);
1027
1028                         pc = READ_REG(BIT_CUR_PC);
1029                         if (pc) {
1030                                 dev_warn(vpu_dev, "Not power off after suspend (PC=0x%x)\n", pc);
1031                                 goto out;
1032                         }
1033
1034                         /* Restore registers */
1035                         for (i = 0; i < 64; i++)
1036                                 WRITE_REG(regBk[i], BIT_CODE_BUF_ADDR + (i * 4));
1037
1038                         WRITE_REG(0x0, BIT_RESET_CTRL);
1039                         WRITE_REG(0x0, BIT_CODE_RUN);
1040
1041                         /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
1042                         if (vpu_data->soc_data->quirk_subblk_en)
1043                                 WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
1044
1045                         /*
1046                          * Re-load boot code, from the codebuffer in external RAM.
1047                          * Thankfully, we only need 4096 bytes, same for all platforms.
1048                          */
1049                         for (i = 0; i < 2048; i += 4) {
1050                                 data = p[(i / 2) + 1];
1051                                 data_hi = (data >> 16) & 0xFFFF;
1052                                 data_lo = data & 0xFFFF;
1053                                 WRITE_REG((i << 16) | data_hi, BIT_CODE_DOWN);
1054                                 WRITE_REG(((i + 1) << 16) | data_lo,
1055                                                 BIT_CODE_DOWN);
1056
1057                                 data = p[i / 2];
1058                                 data_hi = (data >> 16) & 0xFFFF;
1059                                 data_lo = data & 0xFFFF;
1060                                 WRITE_REG(((i + 2) << 16) | data_hi,
1061                                                 BIT_CODE_DOWN);
1062                                 WRITE_REG(((i + 3) << 16) | data_lo,
1063                                                 BIT_CODE_DOWN);
1064                         }
1065
1066                         if (pc_before_suspend) {
1067                                 WRITE_REG(0x1, BIT_BUSY_FLAG);
1068                                 WRITE_REG(0x1, BIT_CODE_RUN);
1069                                 while (READ_REG(BIT_BUSY_FLAG))
1070                                         ;
1071                         } else {
1072                                 dev_warn(vpu_dev, "PC=0 before suspend\n");
1073                         }
1074                 }
1075         }
1076 out:
1077         mutex_unlock(&vpu_data->lock);
1078         return 0;
1079 }
1080
1081 static SIMPLE_DEV_PM_OPS(vpu_pm_ops, vpu_suspend, vpu_resume);
1082 #define VPU_PM_OPS &vpu_pm_ops
1083 #else
1084 #define VPU_PM_OPS NULL
1085 #endif /* !CONFIG_PM_SLEEP */
1086
1087 /*! Driver definition
1088  *
1089  */
1090 static struct platform_driver mxcvpu_driver = {
1091         .driver = {
1092                 .name = "mxc_vpu",
1093                 .of_match_table = vpu_of_match,
1094                 .pm = VPU_PM_OPS,
1095         },
1096         .probe = vpu_dev_probe,
1097         .remove = vpu_dev_remove,
1098 };
1099
1100 module_platform_driver(mxcvpu_driver);
1101
1102 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1103 MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
1104 MODULE_LICENSE("GPL");