]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/mxc/vpu/mxc_vpu.c
mxc: vpu: improve clk enable/disable handling
[karo-tx-linux.git] / drivers / mxc / vpu / mxc_vpu.c
1 /*
2  * Copyright 2006-2013 Freescale Semiconductor, Inc. All Rights Reserved.
3  */
4
5 /*
6  * The code contained herein is licensed under the GNU General Public
7  * License. You may obtain a copy of the GNU General Public License
8  * Version 2 or later at the following locations:
9  *
10  * http://www.opensource.org/licenses/gpl-license.html
11  * http://www.gnu.org/copyleft/gpl.html
12  */
13
14 /*!
15  * @file mxc_vpu.c
16  *
17  * @brief VPU system initialization and file operation implementation
18  *
19  * @ingroup VPU
20  */
21
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/stat.h>
27 #include <linux/platform_device.h>
28 #include <linux/kdev_t.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/wait.h>
31 #include <linux/list.h>
32 #include <linux/clk.h>
33 #include <linux/delay.h>
34 #include <linux/fsl_devices.h>
35 #include <linux/uaccess.h>
36 #include <linux/io.h>
37 #include <linux/slab.h>
38 #include <linux/workqueue.h>
39 #include <linux/sched.h>
40 #include <linux/vmalloc.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/page-flags.h>
43 #include <linux/mm_types.h>
44 #include <linux/types.h>
45 #include <linux/memblock.h>
46 #include <linux/memory.h>
47 #include <linux/version.h>
48 #include <linux/module.h>
49 #include <linux/pm_runtime.h>
50 #include <linux/sizes.h>
51 #include <linux/genalloc.h>
52 #include <linux/of.h>
53 #include <linux/of_device.h>
54 #include <linux/reset.h>
55 #include <linux/clk.h>
56 #include <linux/mxc_vpu.h>
57
58 /* Define one new pgprot which combined uncached and XN(never executable) */
59 #define pgprot_noncachedxn(prot) \
60         __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
61
62 struct vpu_priv {
63         struct fasync_struct *async_queue;
64         struct work_struct work;
65         struct workqueue_struct *workqueue;
66         struct mutex lock;
67         const struct mxc_vpu_soc_data *soc_data;
68         int clk_enabled;
69 };
70
71 struct vpu_user_data {
72         struct vpu_priv *vpu_data;
73         int clk_enable_cnt;
74 };
75
76 /* To track the allocated memory buffer */
77 struct memalloc_record {
78         struct list_head list;
79         struct vpu_mem_desc mem;
80 };
81
82 struct iram_setting {
83         u32 start;
84         u32 end;
85 };
86
87 struct mxc_vpu_soc_data {
88         unsigned vpu_pwr_mgmnt:1,
89                 regulator_required:1,
90                 quirk_subblk_en:1,
91                 is_mx51:1,
92                 is_mx53:1,
93                 is_mx6dl:1,
94                 is_mx6q:1,
95                 has_jpu:1;
96 };
97
98 static struct gen_pool *iram_pool;
99 static u32 iram_base;
100
101 static LIST_HEAD(mem_list);
102
103 static int vpu_major;
104 static struct class *vpu_class;
105 static struct vpu_priv *vpu_data;
106 static u8 open_count;
107 static struct clk *vpu_clk;
108 static struct vpu_mem_desc bitwork_mem;
109 static struct vpu_mem_desc pic_para_mem;
110 static struct vpu_mem_desc user_data_mem;
111 static struct vpu_mem_desc share_mem;
112 static struct vpu_mem_desc vshare_mem;
113
114 static void __iomem *vpu_base;
115 static int vpu_ipi_irq;
116 static u32 phy_vpu_base_addr;
117
118 static struct device *vpu_dev;
119
120 /* IRAM setting */
121 static struct iram_setting iram;
122
123 /* implement the blocking ioctl */
124 static int irq_status;
125 static int codec_done;
126 static wait_queue_head_t vpu_queue;
127
128 static int vpu_jpu_irq;
129
130 #ifdef CONFIG_PM_SLEEP
131 static unsigned int regBk[64];
132 static unsigned int pc_before_suspend;
133 #endif
134 static struct regulator *vpu_regulator;
135
136 #define READ_REG(x)             readl_relaxed(vpu_base + (x))
137 #define WRITE_REG(val, x)       writel_relaxed(val, vpu_base + (x))
138
139 static int vpu_clk_enable(struct vpu_priv *vpu_data)
140 {
141         if (WARN_ON(vpu_data->clk_enabled < 0))
142                 return -EINVAL;
143
144         if (vpu_data->clk_enabled++ == 0)
145                 return clk_prepare_enable(vpu_clk);
146
147         return 0;
148 }
149
150 static int vpu_clk_disable(struct vpu_priv *vpu_data)
151 {
152         if (WARN_ON(vpu_data->clk_enabled <= 0))
153                 return -EINVAL;
154
155         if (--vpu_data->clk_enabled == 0)
156                 clk_disable_unprepare(vpu_clk);
157         return 0;
158 }
159
160 static inline int vpu_reset(void)
161 {
162         return device_reset(vpu_dev);
163 }
164
165 static void vpu_power_up(void)
166 {
167         int ret;
168
169         if (IS_ERR(vpu_regulator))
170                 return;
171
172         ret = regulator_enable(vpu_regulator);
173         if (ret)
174                 dev_err(vpu_dev, "failed to power up vpu: %d\n", ret);
175 }
176
177 static void vpu_power_down(void)
178 {
179         int ret;
180
181         if (IS_ERR(vpu_regulator))
182                 return;
183
184         ret = regulator_disable(vpu_regulator);
185         if (ret)
186                 dev_err(vpu_dev, "failed to power down vpu: %d\n", ret);
187 }
188
189 /*!
190  * Private function to alloc dma buffer
191  * @return status  0 success.
192  */
193 static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
194 {
195         mem->cpu_addr = dma_alloc_coherent(vpu_dev, PAGE_ALIGN(mem->size),
196                                         &mem->phy_addr,
197                                         GFP_DMA | GFP_KERNEL);
198         dev_dbg(vpu_dev, "[ALLOC] mem alloc cpu_addr = %p\n", mem->cpu_addr);
199         if (mem->cpu_addr == NULL) {
200                 dev_err(vpu_dev, "Physical memory allocation error!\n");
201                 return -ENOMEM;
202         }
203         return 0;
204 }
205
206 /*!
207  * Private function to free dma buffer
208  */
209 static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
210 {
211         if (mem->cpu_addr != NULL)
212                 dma_free_coherent(vpu_dev, PAGE_ALIGN(mem->size),
213                                 mem->cpu_addr, mem->phy_addr);
214 }
215
216 /*!
217  * Private function to free buffers
218  * @return status  0 success.
219  */
220 static int vpu_free_buffers(void)
221 {
222         struct memalloc_record *rec, *n;
223         struct vpu_mem_desc mem;
224
225         list_for_each_entry_safe(rec, n, &mem_list, list) {
226                 mem = rec->mem;
227                 if (mem.cpu_addr != 0) {
228                         vpu_free_dma_buffer(&mem);
229                         dev_dbg(vpu_dev, "[FREE] freed paddr=0x%08X\n", mem.phy_addr);
230                         /* delete from list */
231                         list_del(&rec->list);
232                         kfree(rec);
233                 }
234         }
235
236         return 0;
237 }
238
239 static inline void vpu_worker_callback(struct work_struct *w)
240 {
241         struct vpu_priv *dev = container_of(w, struct vpu_priv, work);
242
243         if (dev->async_queue)
244                 kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
245
246         irq_status = 1;
247         /*
248          * Clock is gated on when dec/enc started, gate it off when
249          * codec is done.
250          */
251         if (codec_done)
252                 codec_done = 0;
253
254         wake_up_interruptible(&vpu_queue);
255 }
256
257 /*!
258  * @brief vpu interrupt handler
259  */
260 static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
261 {
262         struct vpu_priv *dev = dev_id;
263         unsigned long reg;
264
265         reg = READ_REG(BIT_INT_REASON);
266         if (reg & 0x8)
267                 codec_done = 1;
268         WRITE_REG(0x1, BIT_INT_CLEAR);
269
270         queue_work(dev->workqueue, &dev->work);
271
272         return IRQ_HANDLED;
273 }
274
275 /*!
276  * @brief vpu jpu interrupt handler
277  */
278 static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
279 {
280         struct vpu_priv *dev = dev_id;
281         unsigned long reg;
282
283         reg = READ_REG(MJPEG_PIC_STATUS_REG);
284         if (reg & 0x3)
285                 codec_done = 1;
286
287         queue_work(dev->workqueue, &dev->work);
288
289         return IRQ_HANDLED;
290 }
291
292 /*!
293  * @brief open function for vpu file operation
294  *
295  * @return  0 on success or negative error code on error
296  */
297 static int vpu_open(struct inode *inode, struct file *filp)
298 {
299         struct vpu_user_data *user_data = devm_kzalloc(vpu_dev,
300                                                 sizeof(*user_data),
301                                                 GFP_KERNEL);
302         if (user_data == NULL)
303                 return -ENOMEM;
304
305         user_data->vpu_data = vpu_data;
306
307         mutex_lock(&vpu_data->lock);
308
309         if (open_count++ == 0) {
310                 pm_runtime_get_sync(vpu_dev);
311                 vpu_power_up();
312         }
313
314         filp->private_data = user_data;
315         mutex_unlock(&vpu_data->lock);
316         return 0;
317 }
318
319 /*!
320  * @brief IO ctrl function for vpu file operation
321  * @param cmd IO ctrl command
322  * @return  0 on success or negative error code on error
323  */
324 static long vpu_ioctl(struct file *filp, u_int cmd,
325                      u_long arg)
326 {
327         int ret = -EINVAL;
328         struct vpu_user_data *user_data = filp->private_data;
329         struct vpu_priv *vpu_data = user_data->vpu_data;
330
331         switch (cmd) {
332         case VPU_IOC_PHYMEM_ALLOC:
333         {
334                 struct memalloc_record *rec;
335
336                 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
337                 if (!rec)
338                         return -ENOMEM;
339
340                 if (copy_from_user(&rec->mem,
341                                         (struct vpu_mem_desc *)arg,
342                                         sizeof(struct vpu_mem_desc))) {
343                         kfree(rec);
344                         return -EFAULT;
345                 }
346
347                 dev_dbg(vpu_dev, "[ALLOC] mem alloc size = 0x%x\n",
348                         rec->mem.size);
349
350                 ret = vpu_alloc_dma_buffer(&rec->mem);
351                 if (ret) {
352                         kfree(rec);
353                         return ret;
354                 }
355                 if (copy_to_user((void __user *)arg, &rec->mem,
356                                         sizeof(struct vpu_mem_desc))) {
357                         kfree(rec);
358                         return -EFAULT;
359                 }
360
361                 mutex_lock(&vpu_data->lock);
362                 list_add(&rec->list, &mem_list);
363                 mutex_unlock(&vpu_data->lock);
364
365                 break;
366         }
367         case VPU_IOC_PHYMEM_FREE:
368         {
369                 struct memalloc_record *rec, *n;
370                 struct vpu_mem_desc vpu_mem;
371
372                 if (copy_from_user(&vpu_mem,
373                                         (struct vpu_mem_desc *)arg,
374                                         sizeof(struct vpu_mem_desc)))
375                         return -EFAULT;
376
377                 dev_dbg(vpu_dev, "[FREE] mem freed cpu_addr = %p\n",
378                         vpu_mem.cpu_addr);
379                 if (vpu_mem.cpu_addr != NULL)
380                         vpu_free_dma_buffer(&vpu_mem);
381
382                 mutex_lock(&vpu_data->lock);
383                 list_for_each_entry_safe(rec, n, &mem_list, list) {
384                         if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
385                                 list_del(&rec->list);
386                                 break;
387                         }
388                 }
389                 kfree(rec);
390                 mutex_unlock(&vpu_data->lock);
391
392                 break;
393         }
394         case VPU_IOC_WAIT4INT:
395         {
396                 u_long timeout = arg;
397
398                 ret = wait_event_interruptible_timeout(vpu_queue,
399                                                 irq_status != 0,
400                                                 msecs_to_jiffies(timeout));
401                 if (ret == 0) {
402                         dev_warn(vpu_dev, "VPU blocking: timeout.\n");
403                         ret = -ETIMEDOUT;
404                 } else if (signal_pending(current)) {
405                         dev_warn(vpu_dev, "VPU interrupt received.\n");
406                         ret = -ERESTARTSYS;
407                 } else {
408                         irq_status = 0;
409                 }
410                 break;
411         }
412         case VPU_IOC_IRAM_SETTING:
413                 ret = copy_to_user((void __user *)arg, &iram,
414                                 sizeof(struct iram_setting));
415                 if (ret)
416                         ret = -EFAULT;
417
418                 break;
419         case VPU_IOC_CLKGATE_SETTING:
420         {
421                 u32 clkgate_en;
422
423                 if (get_user(clkgate_en, (u32 __user *)arg))
424                         return -EFAULT;
425
426                 mutex_lock(&vpu_data->lock);
427                 if (clkgate_en) {
428                         ret = vpu_clk_enable(vpu_data);
429                         if (ret == 0)
430                                 user_data->clk_enable_cnt++;
431                 } else {
432                         if (user_data->clk_enable_cnt == 0) {
433                                 ret = -EINVAL;
434                         } else {
435                                 if (--user_data->clk_enable_cnt == 0)
436                                         vpu_clk_disable(vpu_data);
437                                 ret = 0;
438                         }
439                 }
440                 mutex_unlock(&vpu_data->lock);
441                 break;
442         }
443         case VPU_IOC_GET_SHARE_MEM:
444                 mutex_lock(&vpu_data->lock);
445                 if (share_mem.cpu_addr == NULL) {
446                         if (copy_from_user(&share_mem,
447                                                 (struct vpu_mem_desc *)arg,
448                                                 sizeof(struct vpu_mem_desc))) {
449                                 mutex_unlock(&vpu_data->lock);
450                                 return -EFAULT;
451                         }
452                         ret = vpu_alloc_dma_buffer(&share_mem);
453                         if (ret) {
454                                 mutex_unlock(&vpu_data->lock);
455                                 return ret;
456                         }
457                 }
458                 if (copy_to_user((void __user *)arg,
459                                         &share_mem,
460                                         sizeof(struct vpu_mem_desc)))
461                         ret = -EFAULT;
462                 else
463                         ret = 0;
464                 mutex_unlock(&vpu_data->lock);
465                 break;
466         case VPU_IOC_REQ_VSHARE_MEM:
467                 mutex_lock(&vpu_data->lock);
468                 if (vshare_mem.cpu_addr == NULL) {
469                         if (copy_from_user(&vshare_mem,
470                                                 (struct vpu_mem_desc *)arg,
471                                                 sizeof(struct
472                                                         vpu_mem_desc))) {
473                                 mutex_unlock(&vpu_data->lock);
474                                 return -EFAULT;
475                         }
476                         vshare_mem.cpu_addr = vmalloc_user(vshare_mem.size);
477                         if (vshare_mem.cpu_addr == NULL) {
478                                 mutex_unlock(&vpu_data->lock);
479                                 return -ENOMEM;
480                         }
481                 }
482                 if (copy_to_user((void __user *)arg, &vshare_mem,
483                                         sizeof(struct vpu_mem_desc)))
484                         ret = -EFAULT;
485                 else
486                         ret = 0;
487                 mutex_unlock(&vpu_data->lock);
488                 break;
489         case VPU_IOC_GET_WORK_ADDR:
490                 if (bitwork_mem.cpu_addr == 0) {
491                         if (copy_from_user(&bitwork_mem,
492                                                 (struct vpu_mem_desc *)arg,
493                                                 sizeof(struct vpu_mem_desc)))
494                                 return -EFAULT;
495
496                         ret = vpu_alloc_dma_buffer(&bitwork_mem);
497                         if (ret)
498                                 return ret;
499                 }
500                 if (copy_to_user((void __user *)arg,
501                                         &bitwork_mem,
502                                         sizeof(struct
503                                                 vpu_mem_desc)))
504                         ret = -EFAULT;
505                 else
506                         ret = 0;
507                 break;
508         /*
509          * The following two ioctls are used when user allocates a working buffer
510          * and registers it to vpu driver.
511          */
512         case VPU_IOC_QUERY_BITWORK_MEM:
513                 if (copy_to_user((void __user *)arg,
514                                         &bitwork_mem,
515                                         sizeof(struct vpu_mem_desc)))
516                         ret = -EFAULT;
517                 else
518                         ret = 0;
519                 break;
520         case VPU_IOC_SET_BITWORK_MEM:
521                 if (copy_from_user(&bitwork_mem,
522                                         (struct vpu_mem_desc *)arg,
523                                         sizeof(struct vpu_mem_desc)))
524                         ret = -EFAULT;
525                 else
526                         ret = 0;
527                 break;
528         case VPU_IOC_SYS_SW_RESET:
529                 ret = vpu_reset();
530                 break;
531         case VPU_IOC_REG_DUMP:
532         case VPU_IOC_PHYMEM_DUMP:
533                 ret = 0;
534                 break;
535         case VPU_IOC_PHYMEM_CHECK:
536         {
537                 struct vpu_mem_desc check_memory;
538
539                 if (copy_from_user(&check_memory, (void __user *)arg,
540                                         sizeof(struct vpu_mem_desc)))
541                         return -EFAULT;
542
543                 check_memory.size = 1;
544                 if (copy_to_user((void __user *)arg, &check_memory,
545                                         sizeof(struct vpu_mem_desc)))
546                         ret = -EFAULT;
547                 else
548                         ret = 0;
549                 break;
550         }
551         case VPU_IOC_LOCK_DEV:
552         {
553                 u32 lock_en;
554
555                 if (get_user(lock_en, (u32 __user *)arg))
556                         return -EFAULT;
557
558                 if (lock_en)
559                         mutex_lock(&vpu_data->lock);
560                 else
561                         mutex_unlock(&vpu_data->lock);
562                 ret = 0;
563                 break;
564         }
565         default:
566                 dev_err(vpu_dev, "No such IOCTL, cmd is %d\n", cmd);
567         }
568         return ret;
569 }
570
571 /*!
572  * @brief Release function for vpu file operation
573  * @return  0 on success or negative error code on error
574  */
575 static int vpu_release(struct inode *inode, struct file *filp)
576 {
577         unsigned long timeout;
578         struct vpu_user_data *user_data = filp->private_data;
579         struct vpu_priv *vpu_data = user_data->vpu_data;
580
581         mutex_lock(&vpu_data->lock);
582
583         if (open_count > 0 && !--open_count) {
584                 /* Wait for vpu go to idle state */
585                 vpu_clk_enable(vpu_data);
586                 if (READ_REG(BIT_CUR_PC)) {
587
588                         timeout = jiffies + HZ;
589                         while (READ_REG(BIT_BUSY_FLAG)) {
590                                 msleep(1);
591                                 if (time_after(jiffies, timeout)) {
592                                         dev_warn(vpu_dev, "VPU timeout during release\n");
593                                         break;
594                                 }
595                         }
596
597                         /* Clean up interrupt */
598                         cancel_work_sync(&vpu_data->work);
599                         flush_workqueue(vpu_data->workqueue);
600                         irq_status = 0;
601
602                         if (READ_REG(BIT_BUSY_FLAG)) {
603                                 if (vpu_data->soc_data->is_mx51 ||
604                                         vpu_data->soc_data->is_mx53) {
605                                         dev_err(vpu_dev,
606                                                 "fatal error: can't gate/power off when VPU is busy\n");
607                                         vpu_clk_disable(vpu_data);
608                                         mutex_unlock(&vpu_data->lock);
609                                         return -EBUSY;
610                                 }
611                                 if (vpu_data->soc_data->is_mx6dl ||
612                                         vpu_data->soc_data->is_mx6q) {
613                                         WRITE_REG(0x11, 0x10F0);
614                                         timeout = jiffies + HZ;
615                                         while (READ_REG(0x10F4) != 0x77) {
616                                                 msleep(1);
617                                                 if (time_after(jiffies, timeout))
618                                                         break;
619                                         }
620
621                                         if (READ_REG(0x10F4) != 0x77) {
622                                                 dev_err(vpu_dev,
623                                                         "fatal error: can't gate/power off when VPU is busy\n");
624                                                 WRITE_REG(0x0, 0x10F0);
625                                                 vpu_clk_disable(vpu_data);
626                                                 mutex_unlock(&vpu_data->lock);
627                                                 return -EBUSY;
628                                         }
629                                         vpu_reset();
630                                 }
631                         }
632                 }
633
634                 vpu_free_buffers();
635
636                 /* Free shared memory when vpu device is idle */
637                 vpu_free_dma_buffer(&share_mem);
638                 share_mem.cpu_addr = 0;
639                 vfree(vshare_mem.cpu_addr);
640                 vshare_mem.cpu_addr = 0;
641
642                 if (user_data->clk_enable_cnt)
643                         vpu_clk_disable(vpu_data);
644
645                 vpu_clk_disable(vpu_data);
646                 vpu_power_down();
647                 pm_runtime_put_sync_suspend(vpu_dev);
648                 devm_kfree(vpu_dev, user_data);
649         }
650         mutex_unlock(&vpu_data->lock);
651
652         return 0;
653 }
654
655 /*!
656  * @brief fasync function for vpu file operation
657  * @return  0 on success or negative error code on error
658  */
659 static int vpu_fasync(int fd, struct file *filp, int mode)
660 {
661         struct vpu_user_data *user_data = filp->private_data;
662         struct vpu_priv *vpu_data = user_data->vpu_data;
663         return fasync_helper(fd, filp, mode, &vpu_data->async_queue);
664 }
665
666 /*!
667  * @brief memory map function of harware registers for vpu file operation
668  * @return  0 on success or negative error code on error
669  */
670 static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
671 {
672         unsigned long pfn;
673
674         vm->vm_flags |= VM_IO;
675         /*
676          * Since vpu registers have been mapped with ioremap() at probe
677          * which L_PTE_XN is 1, and the same physical address must be
678          * mapped multiple times with same type, so set L_PTE_XN to 1 here.
679          * Otherwise, there may be unexpected result in video codec.
680          */
681         vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
682         pfn = phy_vpu_base_addr >> PAGE_SHIFT;
683         dev_dbg(vpu_dev, "size=0x%08lx, page no.=0x%08lx\n",
684                  vm->vm_end - vm->vm_start, pfn);
685         return remap_pfn_range(vm, vm->vm_start, pfn,
686                         vm->vm_end - vm->vm_start,
687                         vm->vm_page_prot) ? -EAGAIN : 0;
688 }
689
690 /*!
691  * @brief memory map function of memory for vpu file operation
692  * @return  0 on success or negative error code on error
693  */
694 static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
695 {
696         size_t request_size = vm->vm_end - vm->vm_start;
697
698         dev_dbg(vpu_dev, "start=0x%08lx, pgoff=0x%08lx, size=%zx\n",
699                 vm->vm_start, vm->vm_pgoff, request_size);
700
701         vm->vm_flags |= VM_IO;
702         vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
703
704         return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
705                                request_size, vm->vm_page_prot) ? -EAGAIN : 0;
706 }
707
708 /* !
709  * @brief memory map function of vmalloced share memory
710  * @return  0 on success or negative error code on error
711  */
712 static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
713 {
714         int ret;
715
716         ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
717         vm->vm_flags |= VM_IO;
718         return ret;
719 }
720 /*!
721  * @brief memory map interface for vpu file operation
722  * @return  0 on success or negative error code on error
723  */
724 static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
725 {
726         unsigned long offset;
727
728         offset = (unsigned long)vshare_mem.cpu_addr >> PAGE_SHIFT;
729
730         if (vm->vm_pgoff && (vm->vm_pgoff == offset))
731                 return vpu_map_vshare_mem(fp, vm);
732         else if (vm->vm_pgoff)
733                 return vpu_map_dma_mem(fp, vm);
734         else
735                 return vpu_map_hwregs(fp, vm);
736 }
737
738 static const struct file_operations vpu_fops = {
739         .owner = THIS_MODULE,
740         .open = vpu_open,
741         .unlocked_ioctl = vpu_ioctl,
742         .release = vpu_release,
743         .fasync = vpu_fasync,
744         .mmap = vpu_mmap,
745 };
746
747 static const struct mxc_vpu_soc_data imx6dl_vpu_data = {
748         .regulator_required = 1,
749         .vpu_pwr_mgmnt = 1,
750         .has_jpu = 1,
751 };
752
753 static const struct mxc_vpu_soc_data imx6q_vpu_data = {
754         .quirk_subblk_en = 1,
755         .regulator_required = 1,
756         .vpu_pwr_mgmnt = 1,
757         .has_jpu = 1,
758 };
759
760 static const struct mxc_vpu_soc_data imx53_vpu_data = {
761 };
762
763 static const struct mxc_vpu_soc_data imx51_vpu_data = {
764         .vpu_pwr_mgmnt = 1,
765 };
766
767 static const struct of_device_id vpu_of_match[] = {
768         { .compatible = "fsl,imx6dl-vpu", .data = &imx6dl_vpu_data, },
769         { .compatible = "fsl,imx6q-vpu", .data = &imx6q_vpu_data, },
770         { .compatible = "fsl,imx53-vpu", .data = &imx53_vpu_data, },
771         { .compatible = "fsl,imx51-vpu", .data = &imx51_vpu_data, },
772         { /* sentinel */ }
773 };
774 MODULE_DEVICE_TABLE(of, vpu_of_match);
775
776 /*!
777  * This function is called by the driver framework to initialize the vpu device.
778  * @param   dev The device structure for the vpu passed in by the framework.
779  * @return   0 on success or negative error code on error
780  */
781 static int vpu_dev_probe(struct platform_device *pdev)
782 {
783         int err = 0;
784         struct device *temp_class;
785         struct resource *res;
786         unsigned long addr = 0;
787         struct device_node *np = pdev->dev.of_node;
788         u32 iramsize;
789         struct vpu_priv *drv_data;
790         const struct of_device_id *of_id = of_match_device(vpu_of_match,
791                                                         &pdev->dev);
792         const struct mxc_vpu_soc_data *soc_data = of_id->data;
793
794         drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
795         if (drv_data == NULL)
796                 return -ENOMEM;
797
798         drv_data->soc_data = soc_data;
799         mutex_init(&drv_data->lock);
800
801         init_waitqueue_head(&vpu_queue);
802         drv_data->workqueue = create_workqueue("vpu_wq");
803         INIT_WORK(&drv_data->work, vpu_worker_callback);
804
805         err = of_property_read_u32(np, "iramsize", &iramsize);
806         if (!err && iramsize) {
807                 iram_pool = of_get_named_gen_pool(np, "iram", 0);
808                 if (!iram_pool) {
809                         dev_err(&pdev->dev, "iram pool not available\n");
810                         return -ENOMEM;
811                 }
812
813                 iram_base = gen_pool_alloc(iram_pool, iramsize);
814                 if (!iram_base) {
815                         dev_err(&pdev->dev, "unable to alloc iram\n");
816                         return -ENOMEM;
817                 }
818
819                 addr = gen_pool_virt_to_phys(iram_pool, iram_base);
820         }
821
822         if (addr == 0)
823                 iram.start = iram.end = 0;
824         else {
825                 iram.start = addr;
826                 iram.end = addr + iramsize - 1;
827         }
828
829         vpu_dev = &pdev->dev;
830
831         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
832         vpu_base = devm_ioremap_resource(&pdev->dev, res);
833         if (IS_ERR(vpu_base))
834                 return PTR_ERR(vpu_base);
835         phy_vpu_base_addr = res->start;
836
837         vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
838         if (vpu_major < 0) {
839                 dev_err(vpu_dev, "vpu: unable to get a major for VPU\n");
840                 return vpu_major;
841         }
842
843         vpu_class = class_create(THIS_MODULE, "mxc_vpu");
844         if (IS_ERR(vpu_class)) {
845                 err = PTR_ERR(vpu_class);
846                 goto err_out_chrdev;
847         }
848
849         temp_class = device_create(vpu_class, NULL, MKDEV(vpu_major, 0),
850                                    NULL, "mxc_vpu");
851         if (IS_ERR(temp_class)) {
852                 err = PTR_ERR(temp_class);
853                 goto err_out_class;
854         }
855
856         vpu_clk = clk_get(&pdev->dev, "vpu_clk");
857         if (IS_ERR(vpu_clk)) {
858                 err = PTR_ERR(vpu_clk);
859                 goto err_out_class;
860         }
861
862         vpu_ipi_irq = platform_get_irq_byname(pdev, "vpu_ipi_irq");
863         if (vpu_ipi_irq < 0) {
864                 dev_err(vpu_dev, "vpu: unable to get vpu interrupt\n");
865                 err = vpu_ipi_irq;
866                 goto err_out_class;
867         }
868         err = request_irq(vpu_ipi_irq, vpu_ipi_irq_handler, 0, "VPU_CODEC_IRQ",
869                           drv_data);
870         if (err)
871                 goto err_out_class;
872
873         vpu_regulator = devm_regulator_get(vpu_dev, "pu");
874         if (IS_ERR(vpu_regulator)) {
875                 if (drv_data->soc_data->regulator_required) {
876                         dev_err(vpu_dev, "failed to get vpu power\n");
877                         goto err_out_class;
878                 } else {
879                         /* regulator_get will return error on MX5x,
880                          * just igore it everywhere
881                          */
882                         dev_warn(vpu_dev, "failed to get vpu power\n");
883                 }
884         }
885
886         platform_set_drvdata(pdev, drv_data);
887
888         if (drv_data->soc_data->has_jpu) {
889                 vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
890                 if (vpu_jpu_irq < 0) {
891                         dev_err(vpu_dev, "vpu: unable to get vpu jpu interrupt\n");
892                         err = vpu_jpu_irq;
893                         goto err_out_class;
894                 }
895                 err = request_irq(vpu_jpu_irq, vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
896                                 "VPU_JPG_IRQ", drv_data);
897                 if (err)
898                         goto err_out_class;
899         }
900
901         pm_runtime_enable(&pdev->dev);
902         vpu_data = drv_data;
903
904         dev_info(vpu_dev, "VPU initialized\n");
905         return 0;
906
907 err_out_class:
908         device_destroy(vpu_class, MKDEV(vpu_major, 0));
909         class_destroy(vpu_class);
910 err_out_chrdev:
911         unregister_chrdev(vpu_major, "mxc_vpu");
912         return err;
913 }
914
915 static int vpu_dev_remove(struct platform_device *pdev)
916 {
917         struct vpu_priv *vpu_data = platform_get_drvdata(pdev);
918
919         pm_runtime_disable(&pdev->dev);
920
921         free_irq(vpu_ipi_irq, &vpu_data);
922 #ifdef MXC_VPU_HAS_JPU
923         free_irq(vpu_jpu_irq, &vpu_data);
924 #endif
925         cancel_work_sync(&vpu_data->work);
926         flush_workqueue(vpu_data->workqueue);
927         destroy_workqueue(vpu_data->workqueue);
928
929         if (iram.start)
930                 gen_pool_free(iram_pool, iram_base, iram.end-iram.start+1);
931
932         if (vpu_major > 0) {
933                 device_destroy(vpu_class, MKDEV(vpu_major, 0));
934                 class_destroy(vpu_class);
935                 unregister_chrdev(vpu_major, "mxc_vpu");
936                 vpu_major = 0;
937         }
938
939         vpu_free_dma_buffer(&bitwork_mem);
940         vpu_free_dma_buffer(&pic_para_mem);
941         vpu_free_dma_buffer(&user_data_mem);
942
943         /* reset VPU state */
944         vpu_power_up();
945         vpu_clk_enable(vpu_data);
946         vpu_reset();
947         vpu_clk_disable(vpu_data);
948         vpu_power_down();
949
950         clk_put(vpu_clk);
951         return 0;
952 }
953
954 #ifdef CONFIG_PM_SLEEP
955 static int vpu_suspend(struct device *dev)
956 {
957         struct vpu_priv *vpu_data = dev_get_drvdata(dev);
958         unsigned long timeout;
959
960         mutex_lock(&vpu_data->lock);
961
962         if (open_count) {
963                 /* Wait for vpu go to idle state, suspect vpu cannot be changed
964                  * to idle state after about 1 sec
965                  */
966                 timeout = jiffies + HZ;
967                 while (READ_REG(BIT_BUSY_FLAG)) {
968                         msleep(1);
969                         if (time_after(jiffies, timeout)) {
970                                 mutex_unlock(&vpu_data->lock);
971                                 return -EAGAIN;
972                         }
973                 }
974
975                 if (vpu_data->soc_data->is_mx53) {
976                         mutex_unlock(&vpu_data->lock);
977                         return 0;
978                 }
979
980                 if (bitwork_mem.cpu_addr != 0) {
981                         int i;
982
983                         /* Save 64 registers from BIT_CODE_BUF_ADDR */
984                         for (i = 0; i < 64; i++)
985                                 regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
986                         pc_before_suspend = READ_REG(BIT_CUR_PC);
987                 }
988
989                 vpu_clk_disable(vpu_data);
990                 /* If VPU is working before suspend, disable
991                  * regulator to make usecount right.
992                  */
993                 vpu_power_down();
994         }
995
996         mutex_unlock(&vpu_data->lock);
997         return 0;
998 }
999
1000 static int vpu_resume(struct device *dev)
1001 {
1002         int i;
1003         struct vpu_priv *vpu_data = dev_get_drvdata(dev);
1004
1005         mutex_lock(&vpu_data->lock);
1006
1007         if (open_count) {
1008                 if (vpu_data->soc_data->is_mx53) {
1009                         vpu_clk_enable(vpu_data);
1010                         goto out;
1011                 }
1012
1013                 /* If VPU is working before suspend, enable
1014                  * regulator to make usecount right.
1015                  */
1016                 vpu_power_up();
1017
1018                 if (bitwork_mem.cpu_addr != NULL) {
1019                         u32 *p = bitwork_mem.cpu_addr;
1020                         u32 data, pc;
1021                         u16 data_hi;
1022                         u16 data_lo;
1023
1024                         vpu_clk_enable(vpu_data);
1025
1026                         pc = READ_REG(BIT_CUR_PC);
1027                         if (pc) {
1028                                 dev_warn(vpu_dev, "Not power off after suspend (PC=0x%x)\n", pc);
1029                                 goto out;
1030                         }
1031
1032                         /* Restore registers */
1033                         for (i = 0; i < 64; i++)
1034                                 WRITE_REG(regBk[i], BIT_CODE_BUF_ADDR + (i * 4));
1035
1036                         WRITE_REG(0x0, BIT_RESET_CTRL);
1037                         WRITE_REG(0x0, BIT_CODE_RUN);
1038
1039                         /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
1040                         if (vpu_data->soc_data->quirk_subblk_en)
1041                                 WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
1042
1043                         /*
1044                          * Re-load boot code, from the codebuffer in external RAM.
1045                          * Thankfully, we only need 4096 bytes, same for all platforms.
1046                          */
1047                         for (i = 0; i < 2048; i += 4) {
1048                                 data = p[(i / 2) + 1];
1049                                 data_hi = (data >> 16) & 0xFFFF;
1050                                 data_lo = data & 0xFFFF;
1051                                 WRITE_REG((i << 16) | data_hi, BIT_CODE_DOWN);
1052                                 WRITE_REG(((i + 1) << 16) | data_lo,
1053                                                 BIT_CODE_DOWN);
1054
1055                                 data = p[i / 2];
1056                                 data_hi = (data >> 16) & 0xFFFF;
1057                                 data_lo = data & 0xFFFF;
1058                                 WRITE_REG(((i + 2) << 16) | data_hi,
1059                                                 BIT_CODE_DOWN);
1060                                 WRITE_REG(((i + 3) << 16) | data_lo,
1061                                                 BIT_CODE_DOWN);
1062                         }
1063
1064                         if (pc_before_suspend) {
1065                                 WRITE_REG(0x1, BIT_BUSY_FLAG);
1066                                 WRITE_REG(0x1, BIT_CODE_RUN);
1067                                 while (READ_REG(BIT_BUSY_FLAG))
1068                                         ;
1069                         } else {
1070                                 dev_warn(vpu_dev, "PC=0 before suspend\n");
1071                         }
1072                 }
1073         }
1074 out:
1075         mutex_unlock(&vpu_data->lock);
1076         return 0;
1077 }
1078
1079 static SIMPLE_DEV_PM_OPS(vpu_pm_ops, vpu_suspend, vpu_resume);
1080 #define VPU_PM_OPS &vpu_pm_ops
1081 #else
1082 #define VPU_PM_OPS NULL
1083 #endif /* !CONFIG_PM_SLEEP */
1084
1085 /*! Driver definition
1086  *
1087  */
1088 static struct platform_driver mxcvpu_driver = {
1089         .driver = {
1090                 .name = "mxc_vpu",
1091                 .of_match_table = vpu_of_match,
1092                 .pm = VPU_PM_OPS,
1093         },
1094         .probe = vpu_dev_probe,
1095         .remove = vpu_dev_remove,
1096 };
1097
1098 module_platform_driver(mxcvpu_driver);
1099
1100 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1101 MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
1102 MODULE_LICENSE("GPL");