]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/mxc/vpu/mxc_vpu.c
983cb2baf78c110574e7a60a920cf65afe9a07de
[karo-tx-linux.git] / drivers / mxc / vpu / mxc_vpu.c
1 /*
2  * Copyright 2006-2013 Freescale Semiconductor, Inc. All Rights Reserved.
3  */
4
5 /*
6  * The code contained herein is licensed under the GNU General Public
7  * License. You may obtain a copy of the GNU General Public License
8  * Version 2 or later at the following locations:
9  *
10  * http://www.opensource.org/licenses/gpl-license.html
11  * http://www.gnu.org/copyleft/gpl.html
12  */
13
14 /*!
15  * @file mxc_vpu.c
16  *
17  * @brief VPU system initialization and file operation implementation
18  *
19  * @ingroup VPU
20  */
21
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/stat.h>
27 #include <linux/platform_device.h>
28 #include <linux/kdev_t.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/wait.h>
31 #include <linux/list.h>
32 #include <linux/clk.h>
33 #include <linux/delay.h>
34 #include <linux/fsl_devices.h>
35 #include <linux/uaccess.h>
36 #include <linux/io.h>
37 #include <linux/slab.h>
38 #include <linux/workqueue.h>
39 #include <linux/sched.h>
40 #include <linux/vmalloc.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/page-flags.h>
43 #include <linux/mm_types.h>
44 #include <linux/types.h>
45 #include <linux/memblock.h>
46 #include <linux/memory.h>
47 #include <linux/version.h>
48 #include <linux/module.h>
49 #include <linux/pm_runtime.h>
50 #include <linux/sizes.h>
51 #include <linux/genalloc.h>
52 #include <linux/of.h>
53 #include <linux/of_device.h>
54 #include <linux/reset.h>
55 #include <linux/clk.h>
56 #include <linux/mxc_vpu.h>
57
58 /* Define one new pgprot which combined uncached and XN(never executable) */
59 #define pgprot_noncachedxn(prot) \
60         __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
61
62 struct vpu_priv {
63         struct fasync_struct *async_queue;
64         struct work_struct work;
65         struct workqueue_struct *workqueue;
66         struct mutex lock;
67         const struct mxc_vpu_soc_data *soc_data;
68         int clk_enabled;
69 };
70
71 struct vpu_user_data {
72         struct vpu_priv *vpu_data;
73         int clk_enable_cnt;
74 };
75
76 /* To track the allocated memory buffer */
77 struct memalloc_record {
78         struct list_head list;
79         struct vpu_mem_desc mem;
80 };
81
82 struct iram_setting {
83         u32 start;
84         u32 end;
85 };
86
87 struct mxc_vpu_soc_data {
88         unsigned vpu_pwr_mgmnt:1,
89                 regulator_required:1,
90                 quirk_subblk_en:1,
91                 is_mx51:1,
92                 is_mx53:1,
93                 is_mx6dl:1,
94                 is_mx6q:1,
95                 has_jpu:1;
96 };
97
98 static struct gen_pool *iram_pool;
99 static u32 iram_base;
100
101 static LIST_HEAD(mem_list);
102
103 static int vpu_major;
104 static struct class *vpu_class;
105 static struct vpu_priv *vpu_data;
106 static u8 open_count;
107 static struct clk *vpu_clk;
108 static struct vpu_mem_desc bitwork_mem;
109 static struct vpu_mem_desc pic_para_mem;
110 static struct vpu_mem_desc user_data_mem;
111 static struct vpu_mem_desc share_mem;
112 static struct vpu_mem_desc vshare_mem;
113
114 static void __iomem *vpu_base;
115 static int vpu_ipi_irq;
116 static u32 phy_vpu_base_addr;
117
118 static struct device *vpu_dev;
119
120 /* IRAM setting */
121 static struct iram_setting iram;
122
123 /* implement the blocking ioctl */
124 static int irq_status;
125 static int codec_done;
126 static wait_queue_head_t vpu_queue;
127
128 static int vpu_jpu_irq;
129
130 #ifdef CONFIG_PM_SLEEP
131 static unsigned int regBk[64];
132 static unsigned int pc_before_suspend;
133 #endif
134 static struct regulator *vpu_regulator;
135
136 #define READ_REG(x)             readl_relaxed(vpu_base + (x))
137 #define WRITE_REG(val, x)       writel_relaxed(val, vpu_base + (x))
138
139 static int vpu_clk_enable(struct vpu_priv *vpu_data)
140 {
141         int ret = 0;
142
143         if (vpu_data->clk_enabled++ == 0)
144                 ret = clk_prepare_enable(vpu_clk);
145
146         if (WARN_ON(vpu_data->clk_enabled <= 0))
147                 return -EINVAL;
148
149         return ret;
150 }
151
152 static int vpu_clk_disable(struct vpu_priv *vpu_data)
153 {
154         if (WARN_ON(vpu_data->clk_enabled == 0))
155                 return -EINVAL;
156
157         if (--vpu_data->clk_enabled == 0)
158                 clk_disable_unprepare(vpu_clk);
159         return 0;
160 }
161
162 static inline int vpu_reset(void)
163 {
164         return device_reset(vpu_dev);
165 }
166
167 static void vpu_power_up(void)
168 {
169         int ret;
170
171         if (IS_ERR(vpu_regulator))
172                 return;
173
174         ret = regulator_enable(vpu_regulator);
175         if (ret)
176                 dev_err(vpu_dev, "failed to power up vpu: %d\n", ret);
177 }
178
179 static void vpu_power_down(void)
180 {
181         int ret;
182
183         if (IS_ERR(vpu_regulator))
184                 return;
185
186         ret = regulator_disable(vpu_regulator);
187         if (ret)
188                 dev_err(vpu_dev, "failed to power down vpu: %d\n", ret);
189 }
190
191 /*!
192  * Private function to alloc dma buffer
193  * @return status  0 success.
194  */
195 static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
196 {
197         mem->cpu_addr = dma_alloc_coherent(vpu_dev, PAGE_ALIGN(mem->size),
198                                         &mem->phy_addr,
199                                         GFP_DMA | GFP_KERNEL);
200         dev_dbg(vpu_dev, "[ALLOC] mem alloc cpu_addr = %p\n", mem->cpu_addr);
201         if (mem->cpu_addr == NULL) {
202                 dev_err(vpu_dev, "Physical memory allocation error!\n");
203                 return -ENOMEM;
204         }
205         return 0;
206 }
207
208 /*!
209  * Private function to free dma buffer
210  */
211 static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
212 {
213         if (mem->cpu_addr != NULL)
214                 dma_free_coherent(vpu_dev, PAGE_ALIGN(mem->size),
215                                 mem->cpu_addr, mem->phy_addr);
216 }
217
218 /*!
219  * Private function to free buffers
220  * @return status  0 success.
221  */
222 static int vpu_free_buffers(void)
223 {
224         struct memalloc_record *rec, *n;
225         struct vpu_mem_desc mem;
226
227         list_for_each_entry_safe(rec, n, &mem_list, list) {
228                 mem = rec->mem;
229                 if (mem.cpu_addr != 0) {
230                         vpu_free_dma_buffer(&mem);
231                         dev_dbg(vpu_dev, "[FREE] freed paddr=0x%08X\n", mem.phy_addr);
232                         /* delete from list */
233                         list_del(&rec->list);
234                         kfree(rec);
235                 }
236         }
237
238         return 0;
239 }
240
241 static inline void vpu_worker_callback(struct work_struct *w)
242 {
243         struct vpu_priv *dev = container_of(w, struct vpu_priv, work);
244
245         if (dev->async_queue)
246                 kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
247
248         irq_status = 1;
249         /*
250          * Clock is gated on when dec/enc started, gate it off when
251          * codec is done.
252          */
253         if (codec_done)
254                 codec_done = 0;
255
256         wake_up_interruptible(&vpu_queue);
257 }
258
259 /*!
260  * @brief vpu interrupt handler
261  */
262 static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
263 {
264         struct vpu_priv *dev = dev_id;
265         unsigned long reg;
266
267         reg = READ_REG(BIT_INT_REASON);
268         if (reg & 0x8)
269                 codec_done = 1;
270         WRITE_REG(0x1, BIT_INT_CLEAR);
271
272         queue_work(dev->workqueue, &dev->work);
273
274         return IRQ_HANDLED;
275 }
276
277 /*!
278  * @brief vpu jpu interrupt handler
279  */
280 static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
281 {
282         struct vpu_priv *dev = dev_id;
283         unsigned long reg;
284
285         reg = READ_REG(MJPEG_PIC_STATUS_REG);
286         if (reg & 0x3)
287                 codec_done = 1;
288
289         queue_work(dev->workqueue, &dev->work);
290
291         return IRQ_HANDLED;
292 }
293
294 /*!
295  * @brief open function for vpu file operation
296  *
297  * @return  0 on success or negative error code on error
298  */
299 static int vpu_open(struct inode *inode, struct file *filp)
300 {
301         struct vpu_user_data *user_data = devm_kzalloc(vpu_dev,
302                                                 sizeof(*user_data),
303                                                 GFP_KERNEL);
304         if (user_data == NULL)
305                 return -ENOMEM;
306
307         user_data->vpu_data = vpu_data;
308
309         mutex_lock(&vpu_data->lock);
310
311         if (open_count++ == 0) {
312                 pm_runtime_get_sync(vpu_dev);
313                 vpu_power_up();
314         }
315
316         filp->private_data = user_data;
317         mutex_unlock(&vpu_data->lock);
318         return 0;
319 }
320
321 /*!
322  * @brief IO ctrl function for vpu file operation
323  * @param cmd IO ctrl command
324  * @return  0 on success or negative error code on error
325  */
326 static long vpu_ioctl(struct file *filp, u_int cmd,
327                      u_long arg)
328 {
329         int ret = -EINVAL;
330         struct vpu_user_data *user_data = filp->private_data;
331         struct vpu_priv *vpu_data = user_data->vpu_data;
332
333         switch (cmd) {
334         case VPU_IOC_PHYMEM_ALLOC:
335         {
336                 struct memalloc_record *rec;
337
338                 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
339                 if (!rec)
340                         return -ENOMEM;
341
342                 if (copy_from_user(&rec->mem,
343                                         (struct vpu_mem_desc *)arg,
344                                         sizeof(struct vpu_mem_desc))) {
345                         kfree(rec);
346                         return -EFAULT;
347                 }
348
349                 dev_dbg(vpu_dev, "[ALLOC] mem alloc size = 0x%x\n",
350                         rec->mem.size);
351
352                 ret = vpu_alloc_dma_buffer(&rec->mem);
353                 if (ret) {
354                         kfree(rec);
355                         return ret;
356                 }
357                 if (copy_to_user((void __user *)arg, &rec->mem,
358                                         sizeof(struct vpu_mem_desc))) {
359                         kfree(rec);
360                         return -EFAULT;
361                 }
362
363                 mutex_lock(&vpu_data->lock);
364                 list_add(&rec->list, &mem_list);
365                 mutex_unlock(&vpu_data->lock);
366
367                 break;
368         }
369         case VPU_IOC_PHYMEM_FREE:
370         {
371                 struct memalloc_record *rec, *n;
372                 struct vpu_mem_desc vpu_mem;
373
374                 if (copy_from_user(&vpu_mem,
375                                         (struct vpu_mem_desc *)arg,
376                                         sizeof(struct vpu_mem_desc)))
377                         return -EFAULT;
378
379                 dev_dbg(vpu_dev, "[FREE] mem freed cpu_addr = %p\n",
380                         vpu_mem.cpu_addr);
381                 if (vpu_mem.cpu_addr != NULL)
382                         vpu_free_dma_buffer(&vpu_mem);
383
384                 mutex_lock(&vpu_data->lock);
385                 list_for_each_entry_safe(rec, n, &mem_list, list) {
386                         if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
387                                 list_del(&rec->list);
388                                 break;
389                         }
390                 }
391                 kfree(rec);
392                 mutex_unlock(&vpu_data->lock);
393
394                 break;
395         }
396         case VPU_IOC_WAIT4INT:
397         {
398                 u_long timeout = arg;
399
400                 ret = wait_event_interruptible_timeout(vpu_queue,
401                                                 irq_status != 0,
402                                                 msecs_to_jiffies(timeout));
403                 if (ret == 0) {
404                         dev_warn(vpu_dev, "VPU blocking: timeout.\n");
405                         ret = -ETIMEDOUT;
406                 } else if (signal_pending(current)) {
407                         dev_warn(vpu_dev, "VPU interrupt received.\n");
408                         ret = -ERESTARTSYS;
409                 } else {
410                         irq_status = 0;
411                 }
412                 break;
413         }
414         case VPU_IOC_IRAM_SETTING:
415                 ret = copy_to_user((void __user *)arg, &iram,
416                                 sizeof(struct iram_setting));
417                 if (ret)
418                         ret = -EFAULT;
419
420                 break;
421         case VPU_IOC_CLKGATE_SETTING:
422         {
423                 u32 clkgate_en;
424
425                 if (get_user(clkgate_en, (u32 __user *)arg))
426                         return -EFAULT;
427
428                 mutex_lock(&vpu_data->lock);
429                 if (clkgate_en) {
430                         ret = vpu_clk_enable(vpu_data);
431                         if (ret == 0)
432                                 user_data->clk_enable_cnt++;
433                 } else {
434                         if (user_data->clk_enable_cnt == 0) {
435                                 ret = -EINVAL;
436                         } else {
437                                 if (--user_data->clk_enable_cnt == 0)
438                                         vpu_clk_disable(vpu_data);
439                                 ret = 0;
440                         }
441                 }
442                 mutex_unlock(&vpu_data->lock);
443                 break;
444         }
445         case VPU_IOC_GET_SHARE_MEM:
446                 mutex_lock(&vpu_data->lock);
447                 if (share_mem.cpu_addr == NULL) {
448                         if (copy_from_user(&share_mem,
449                                                 (struct vpu_mem_desc *)arg,
450                                                 sizeof(struct vpu_mem_desc))) {
451                                 mutex_unlock(&vpu_data->lock);
452                                 return -EFAULT;
453                         }
454                         ret = vpu_alloc_dma_buffer(&share_mem);
455                         if (ret) {
456                                 mutex_unlock(&vpu_data->lock);
457                                 return ret;
458                         }
459                 }
460                 if (copy_to_user((void __user *)arg,
461                                         &share_mem,
462                                         sizeof(struct vpu_mem_desc)))
463                         ret = -EFAULT;
464                 else
465                         ret = 0;
466                 mutex_unlock(&vpu_data->lock);
467                 break;
468         case VPU_IOC_REQ_VSHARE_MEM:
469                 mutex_lock(&vpu_data->lock);
470                 if (vshare_mem.cpu_addr == NULL) {
471                         if (copy_from_user(&vshare_mem,
472                                                 (struct vpu_mem_desc *)arg,
473                                                 sizeof(struct
474                                                         vpu_mem_desc))) {
475                                 mutex_unlock(&vpu_data->lock);
476                                 return -EFAULT;
477                         }
478                         vshare_mem.cpu_addr = vmalloc_user(vshare_mem.size);
479                         if (vshare_mem.cpu_addr == NULL) {
480                                 mutex_unlock(&vpu_data->lock);
481                                 return -ENOMEM;
482                         }
483                 }
484                 if (copy_to_user((void __user *)arg, &vshare_mem,
485                                         sizeof(struct vpu_mem_desc)))
486                         ret = -EFAULT;
487                 else
488                         ret = 0;
489                 mutex_unlock(&vpu_data->lock);
490                 break;
491         case VPU_IOC_GET_WORK_ADDR:
492                 if (bitwork_mem.cpu_addr == 0) {
493                         if (copy_from_user(&bitwork_mem,
494                                                 (struct vpu_mem_desc *)arg,
495                                                 sizeof(struct vpu_mem_desc)))
496                                 return -EFAULT;
497
498                         ret = vpu_alloc_dma_buffer(&bitwork_mem);
499                         if (ret)
500                                 return ret;
501                 }
502                 if (copy_to_user((void __user *)arg,
503                                         &bitwork_mem,
504                                         sizeof(struct
505                                                 vpu_mem_desc)))
506                         ret = -EFAULT;
507                 else
508                         ret = 0;
509                 break;
510         /*
511          * The following two ioctls are used when user allocates a working buffer
512          * and registers it to vpu driver.
513          */
514         case VPU_IOC_QUERY_BITWORK_MEM:
515                 if (copy_to_user((void __user *)arg,
516                                         &bitwork_mem,
517                                         sizeof(struct vpu_mem_desc)))
518                         ret = -EFAULT;
519                 else
520                         ret = 0;
521                 break;
522         case VPU_IOC_SET_BITWORK_MEM:
523                 if (copy_from_user(&bitwork_mem,
524                                         (struct vpu_mem_desc *)arg,
525                                         sizeof(struct vpu_mem_desc)))
526                         ret = -EFAULT;
527                 else
528                         ret = 0;
529                 break;
530         case VPU_IOC_SYS_SW_RESET:
531                 ret = vpu_reset();
532                 break;
533         case VPU_IOC_REG_DUMP:
534         case VPU_IOC_PHYMEM_DUMP:
535                 ret = 0;
536                 break;
537         case VPU_IOC_PHYMEM_CHECK:
538         {
539                 struct vpu_mem_desc check_memory;
540
541                 ret = copy_from_user(&check_memory,
542                                 (void __user *)arg,
543                                 sizeof(struct vpu_mem_desc));
544                 if (ret != 0) {
545                         dev_err(vpu_dev, "copy from user failure:%d\n", ret);
546                         ret = -EFAULT;
547                         break;
548                 }
549                 check_memory.size = 1;
550                 if (copy_to_user((void __user *)arg, &check_memory,
551                                         sizeof(struct vpu_mem_desc)))
552                         ret = -EFAULT;
553                 else
554                         ret = 0;
555                 break;
556         }
557         case VPU_IOC_LOCK_DEV:
558         {
559                 u32 lock_en;
560
561                 if (get_user(lock_en, (u32 __user *)arg))
562                         return -EFAULT;
563
564                 if (lock_en)
565                         mutex_lock(&vpu_data->lock);
566                 else
567                         mutex_unlock(&vpu_data->lock);
568                 ret = 0;
569                 break;
570         }
571         default:
572                 dev_err(vpu_dev, "No such IOCTL, cmd is %d\n", cmd);
573         }
574         return ret;
575 }
576
577 /*!
578  * @brief Release function for vpu file operation
579  * @return  0 on success or negative error code on error
580  */
581 static int vpu_release(struct inode *inode, struct file *filp)
582 {
583         unsigned long timeout;
584         struct vpu_user_data *user_data = filp->private_data;
585         struct vpu_priv *vpu_data = user_data->vpu_data;
586
587         mutex_lock(&vpu_data->lock);
588
589         if (open_count > 0 && !--open_count) {
590                 /* Wait for vpu go to idle state */
591                 vpu_clk_enable(vpu_data);
592                 if (READ_REG(BIT_CUR_PC)) {
593
594                         timeout = jiffies + HZ;
595                         while (READ_REG(BIT_BUSY_FLAG)) {
596                                 msleep(1);
597                                 if (time_after(jiffies, timeout)) {
598                                         dev_warn(vpu_dev, "VPU timeout during release\n");
599                                         break;
600                                 }
601                         }
602
603                         /* Clean up interrupt */
604                         cancel_work_sync(&vpu_data->work);
605                         flush_workqueue(vpu_data->workqueue);
606                         irq_status = 0;
607
608                         if (READ_REG(BIT_BUSY_FLAG)) {
609                                 if (vpu_data->soc_data->is_mx51 ||
610                                         vpu_data->soc_data->is_mx53) {
611                                         dev_err(vpu_dev,
612                                                 "fatal error: can't gate/power off when VPU is busy\n");
613                                         vpu_clk_disable(vpu_data);
614                                         mutex_unlock(&vpu_data->lock);
615                                         return -EBUSY;
616                                 }
617                                 if (vpu_data->soc_data->is_mx6dl ||
618                                         vpu_data->soc_data->is_mx6q) {
619                                         WRITE_REG(0x11, 0x10F0);
620                                         timeout = jiffies + HZ;
621                                         while (READ_REG(0x10F4) != 0x77) {
622                                                 msleep(1);
623                                                 if (time_after(jiffies, timeout))
624                                                         break;
625                                         }
626
627                                         if (READ_REG(0x10F4) != 0x77) {
628                                                 dev_err(vpu_dev,
629                                                         "fatal error: can't gate/power off when VPU is busy\n");
630                                                 WRITE_REG(0x0, 0x10F0);
631                                                 vpu_clk_disable(vpu_data);
632                                                 mutex_unlock(&vpu_data->lock);
633                                                 return -EBUSY;
634                                         }
635                                         vpu_reset();
636                                 }
637                         }
638                 }
639
640                 vpu_free_buffers();
641
642                 /* Free shared memory when vpu device is idle */
643                 vpu_free_dma_buffer(&share_mem);
644                 share_mem.cpu_addr = 0;
645                 vfree(vshare_mem.cpu_addr);
646                 vshare_mem.cpu_addr = 0;
647
648                 if (user_data->clk_enable_cnt)
649                         vpu_clk_disable(vpu_data);
650
651                 vpu_clk_disable(vpu_data);
652                 vpu_power_down();
653                 pm_runtime_put_sync_suspend(vpu_dev);
654                 devm_kfree(vpu_dev, user_data);
655         }
656         mutex_unlock(&vpu_data->lock);
657
658         return 0;
659 }
660
661 /*!
662  * @brief fasync function for vpu file operation
663  * @return  0 on success or negative error code on error
664  */
665 static int vpu_fasync(int fd, struct file *filp, int mode)
666 {
667         struct vpu_user_data *user_data = filp->private_data;
668         struct vpu_priv *vpu_data = user_data->vpu_data;
669         return fasync_helper(fd, filp, mode, &vpu_data->async_queue);
670 }
671
672 /*!
673  * @brief memory map function of harware registers for vpu file operation
674  * @return  0 on success or negative error code on error
675  */
676 static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
677 {
678         unsigned long pfn;
679
680         vm->vm_flags |= VM_IO;
681         /*
682          * Since vpu registers have been mapped with ioremap() at probe
683          * which L_PTE_XN is 1, and the same physical address must be
684          * mapped multiple times with same type, so set L_PTE_XN to 1 here.
685          * Otherwise, there may be unexpected result in video codec.
686          */
687         vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
688         pfn = phy_vpu_base_addr >> PAGE_SHIFT;
689         dev_dbg(vpu_dev, "size=0x%lx, page no.=0x%lx\n",
690                  vm->vm_end - vm->vm_start, pfn);
691         return remap_pfn_range(vm, vm->vm_start, pfn,
692                         vm->vm_end - vm->vm_start,
693                         vm->vm_page_prot) ? -EAGAIN : 0;
694 }
695
696 /*!
697  * @brief memory map function of memory for vpu file operation
698  * @return  0 on success or negative error code on error
699  */
700 static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
701 {
702         size_t request_size = vm->vm_end - vm->vm_start;
703
704         dev_dbg(vpu_dev, "start=0x%08lx, pgoff=0x%08lx, size=%zx\n",
705                 vm->vm_start, vm->vm_pgoff, request_size);
706
707         vm->vm_flags |= VM_IO;
708         vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
709
710         return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
711                                request_size, vm->vm_page_prot) ? -EAGAIN : 0;
712 }
713
714 /* !
715  * @brief memory map function of vmalloced share memory
716  * @return  0 on success or negative error code on error
717  */
718 static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
719 {
720         int ret;
721
722         ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
723         vm->vm_flags |= VM_IO;
724         return ret;
725 }
726 /*!
727  * @brief memory map interface for vpu file operation
728  * @return  0 on success or negative error code on error
729  */
730 static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
731 {
732         unsigned long offset;
733
734         offset = (unsigned long)vshare_mem.cpu_addr >> PAGE_SHIFT;
735
736         if (vm->vm_pgoff && (vm->vm_pgoff == offset))
737                 return vpu_map_vshare_mem(fp, vm);
738         else if (vm->vm_pgoff)
739                 return vpu_map_dma_mem(fp, vm);
740         else
741                 return vpu_map_hwregs(fp, vm);
742 }
743
744 static const struct file_operations vpu_fops = {
745         .owner = THIS_MODULE,
746         .open = vpu_open,
747         .unlocked_ioctl = vpu_ioctl,
748         .release = vpu_release,
749         .fasync = vpu_fasync,
750         .mmap = vpu_mmap,
751 };
752
753 static const struct mxc_vpu_soc_data imx6dl_vpu_data = {
754         .regulator_required = 1,
755         .vpu_pwr_mgmnt = 1,
756         .has_jpu = 1,
757 };
758
759 static const struct mxc_vpu_soc_data imx6q_vpu_data = {
760         .quirk_subblk_en = 1,
761         .regulator_required = 1,
762         .vpu_pwr_mgmnt = 1,
763         .has_jpu = 1,
764 };
765
766 static const struct mxc_vpu_soc_data imx53_vpu_data = {
767 };
768
769 static const struct mxc_vpu_soc_data imx51_vpu_data = {
770         .vpu_pwr_mgmnt = 1,
771 };
772
773 static const struct of_device_id vpu_of_match[] = {
774         { .compatible = "fsl,imx6dl-vpu", .data = &imx6dl_vpu_data, },
775         { .compatible = "fsl,imx6q-vpu", .data = &imx6q_vpu_data, },
776         { .compatible = "fsl,imx53-vpu", .data = &imx53_vpu_data, },
777         { .compatible = "fsl,imx51-vpu", .data = &imx51_vpu_data, },
778         { /* sentinel */ }
779 };
780 MODULE_DEVICE_TABLE(of, vpu_of_match);
781
782 /*!
783  * This function is called by the driver framework to initialize the vpu device.
784  * @param   dev The device structure for the vpu passed in by the framework.
785  * @return   0 on success or negative error code on error
786  */
787 static int vpu_dev_probe(struct platform_device *pdev)
788 {
789         int err = 0;
790         struct device *temp_class;
791         struct resource *res;
792         unsigned long addr = 0;
793         struct device_node *np = pdev->dev.of_node;
794         u32 iramsize;
795         struct vpu_priv *drv_data;
796         const struct of_device_id *of_id = of_match_device(vpu_of_match,
797                                                         &pdev->dev);
798         const struct mxc_vpu_soc_data *soc_data = of_id->data;
799
800         drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
801         if (drv_data == NULL)
802                 return -ENOMEM;
803
804         drv_data->soc_data = soc_data;
805         mutex_init(&drv_data->lock);
806
807         init_waitqueue_head(&vpu_queue);
808         drv_data->workqueue = create_workqueue("vpu_wq");
809         INIT_WORK(&drv_data->work, vpu_worker_callback);
810
811         err = of_property_read_u32(np, "iramsize", &iramsize);
812         if (!err && iramsize) {
813                 iram_pool = of_get_named_gen_pool(np, "iram", 0);
814                 if (!iram_pool) {
815                         dev_err(&pdev->dev, "iram pool not available\n");
816                         return -ENOMEM;
817                 }
818
819                 iram_base = gen_pool_alloc(iram_pool, iramsize);
820                 if (!iram_base) {
821                         dev_err(&pdev->dev, "unable to alloc iram\n");
822                         return -ENOMEM;
823                 }
824
825                 addr = gen_pool_virt_to_phys(iram_pool, iram_base);
826         }
827
828         if (addr == 0)
829                 iram.start = iram.end = 0;
830         else {
831                 iram.start = addr;
832                 iram.end = addr + iramsize - 1;
833         }
834
835         vpu_dev = &pdev->dev;
836
837         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
838         if (!res) {
839                 dev_err(vpu_dev, "vpu: unable to get vpu base addr\n");
840                 return -ENODEV;
841         }
842         phy_vpu_base_addr = res->start;
843         vpu_base = devm_ioremap_resource(&pdev->dev, res);
844         if (IS_ERR(vpu_base))
845                 return PTR_ERR(vpu_base);
846
847         vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
848         if (vpu_major < 0) {
849                 dev_err(vpu_dev, "vpu: unable to get a major for VPU\n");
850                 return vpu_major;
851         }
852
853         vpu_class = class_create(THIS_MODULE, "mxc_vpu");
854         if (IS_ERR(vpu_class)) {
855                 err = PTR_ERR(vpu_class);
856                 goto err_out_chrdev;
857         }
858
859         temp_class = device_create(vpu_class, NULL, MKDEV(vpu_major, 0),
860                                    NULL, "mxc_vpu");
861         if (IS_ERR(temp_class)) {
862                 err = PTR_ERR(temp_class);
863                 goto err_out_class;
864         }
865
866         vpu_clk = clk_get(&pdev->dev, "vpu_clk");
867         if (IS_ERR(vpu_clk)) {
868                 err = PTR_ERR(vpu_clk);
869                 goto err_out_class;
870         }
871
872         vpu_ipi_irq = platform_get_irq_byname(pdev, "vpu_ipi_irq");
873         if (vpu_ipi_irq < 0) {
874                 dev_err(vpu_dev, "vpu: unable to get vpu interrupt\n");
875                 err = vpu_ipi_irq;
876                 goto err_out_class;
877         }
878         err = request_irq(vpu_ipi_irq, vpu_ipi_irq_handler, 0, "VPU_CODEC_IRQ",
879                           drv_data);
880         if (err)
881                 goto err_out_class;
882
883         vpu_regulator = devm_regulator_get(vpu_dev, "pu");
884         if (IS_ERR(vpu_regulator)) {
885                 if (drv_data->soc_data->regulator_required) {
886                         dev_err(vpu_dev, "failed to get vpu power\n");
887                         goto err_out_class;
888                 } else {
889                         /* regulator_get will return error on MX5x,
890                          * just igore it everywhere
891                          */
892                         dev_warn(vpu_dev, "failed to get vpu power\n");
893                 }
894         }
895
896         platform_set_drvdata(pdev, drv_data);
897
898         if (drv_data->soc_data->has_jpu) {
899                 vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
900                 if (vpu_jpu_irq < 0) {
901                         dev_err(vpu_dev, "vpu: unable to get vpu jpu interrupt\n");
902                         err = vpu_jpu_irq;
903                         goto err_out_class;
904                 }
905                 err = request_irq(vpu_jpu_irq, vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
906                                 "VPU_JPG_IRQ", drv_data);
907                 if (err)
908                         goto err_out_class;
909         }
910
911         pm_runtime_enable(&pdev->dev);
912         vpu_data = drv_data;
913
914         dev_info(vpu_dev, "VPU initialized\n");
915         return 0;
916
917 err_out_class:
918         device_destroy(vpu_class, MKDEV(vpu_major, 0));
919         class_destroy(vpu_class);
920 err_out_chrdev:
921         unregister_chrdev(vpu_major, "mxc_vpu");
922         return err;
923 }
924
925 static int vpu_dev_remove(struct platform_device *pdev)
926 {
927         struct vpu_priv *vpu_data = platform_get_drvdata(pdev);
928
929         pm_runtime_disable(&pdev->dev);
930
931         free_irq(vpu_ipi_irq, &vpu_data);
932 #ifdef MXC_VPU_HAS_JPU
933         free_irq(vpu_jpu_irq, &vpu_data);
934 #endif
935         cancel_work_sync(&vpu_data->work);
936         flush_workqueue(vpu_data->workqueue);
937         destroy_workqueue(vpu_data->workqueue);
938
939         if (iram.start)
940                 gen_pool_free(iram_pool, iram_base, iram.end-iram.start+1);
941
942         if (vpu_major > 0) {
943                 device_destroy(vpu_class, MKDEV(vpu_major, 0));
944                 class_destroy(vpu_class);
945                 unregister_chrdev(vpu_major, "mxc_vpu");
946                 vpu_major = 0;
947         }
948
949         vpu_free_dma_buffer(&bitwork_mem);
950         vpu_free_dma_buffer(&pic_para_mem);
951         vpu_free_dma_buffer(&user_data_mem);
952
953         /* reset VPU state */
954         vpu_power_up();
955         vpu_clk_enable(vpu_data);
956         vpu_reset();
957         vpu_clk_disable(vpu_data);
958         vpu_power_down();
959
960         clk_put(vpu_clk);
961         return 0;
962 }
963
964 #ifdef CONFIG_PM_SLEEP
965 static int vpu_suspend(struct device *dev)
966 {
967         struct vpu_priv *vpu_data = dev_get_drvdata(dev);
968         unsigned long timeout;
969
970         mutex_lock(&vpu_data->lock);
971
972         if (open_count) {
973                 /* Wait for vpu go to idle state, suspect vpu cannot be changed
974                  * to idle state after about 1 sec
975                  */
976                 timeout = jiffies + HZ;
977                 while (READ_REG(BIT_BUSY_FLAG)) {
978                         msleep(1);
979                         if (time_after(jiffies, timeout)) {
980                                 mutex_unlock(&vpu_data->lock);
981                                 return -EAGAIN;
982                         }
983                 }
984
985                 if (vpu_data->soc_data->is_mx53) {
986                         mutex_unlock(&vpu_data->lock);
987                         return 0;
988                 }
989
990                 if (bitwork_mem.cpu_addr != 0) {
991                         int i;
992
993                         /* Save 64 registers from BIT_CODE_BUF_ADDR */
994                         for (i = 0; i < 64; i++)
995                                 regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
996                         pc_before_suspend = READ_REG(BIT_CUR_PC);
997                 }
998
999                 vpu_clk_disable(vpu_data);
1000                 /* If VPU is working before suspend, disable
1001                  * regulator to make usecount right.
1002                  */
1003                 vpu_power_down();
1004         }
1005
1006         mutex_unlock(&vpu_data->lock);
1007         return 0;
1008 }
1009
1010 static int vpu_resume(struct device *dev)
1011 {
1012         int i;
1013         struct vpu_priv *vpu_data = dev_get_drvdata(dev);
1014
1015         mutex_lock(&vpu_data->lock);
1016
1017         if (open_count) {
1018                 if (vpu_data->soc_data->is_mx53) {
1019                         vpu_clk_enable(vpu_data);
1020                         goto out;
1021                 }
1022
1023                 /* If VPU is working before suspend, enable
1024                  * regulator to make usecount right.
1025                  */
1026                 vpu_power_up();
1027
1028                 if (bitwork_mem.cpu_addr != NULL) {
1029                         u32 *p = bitwork_mem.cpu_addr;
1030                         u32 data, pc;
1031                         u16 data_hi;
1032                         u16 data_lo;
1033
1034                         vpu_clk_enable(vpu_data);
1035
1036                         pc = READ_REG(BIT_CUR_PC);
1037                         if (pc) {
1038                                 dev_warn(vpu_dev, "Not power off after suspend (PC=0x%x)\n", pc);
1039                                 goto out;
1040                         }
1041
1042                         /* Restore registers */
1043                         for (i = 0; i < 64; i++)
1044                                 WRITE_REG(regBk[i], BIT_CODE_BUF_ADDR + (i * 4));
1045
1046                         WRITE_REG(0x0, BIT_RESET_CTRL);
1047                         WRITE_REG(0x0, BIT_CODE_RUN);
1048                         /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
1049                         if (vpu_data->soc_data->quirk_subblk_en)
1050                                 WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
1051
1052                         /*
1053                          * Re-load boot code, from the codebuffer in external RAM.
1054                          * Thankfully, we only need 4096 bytes, same for all platforms.
1055                          */
1056                         for (i = 0; i < 2048; i += 4) {
1057                                 data = p[(i / 2) + 1];
1058                                 data_hi = (data >> 16) & 0xFFFF;
1059                                 data_lo = data & 0xFFFF;
1060                                 WRITE_REG((i << 16) | data_hi, BIT_CODE_DOWN);
1061                                 WRITE_REG(((i + 1) << 16) | data_lo,
1062                                                 BIT_CODE_DOWN);
1063
1064                                 data = p[i / 2];
1065                                 data_hi = (data >> 16) & 0xFFFF;
1066                                 data_lo = data & 0xFFFF;
1067                                 WRITE_REG(((i + 2) << 16) | data_hi,
1068                                                 BIT_CODE_DOWN);
1069                                 WRITE_REG(((i + 3) << 16) | data_lo,
1070                                                 BIT_CODE_DOWN);
1071                         }
1072
1073                         if (pc_before_suspend) {
1074                                 WRITE_REG(0x1, BIT_BUSY_FLAG);
1075                                 WRITE_REG(0x1, BIT_CODE_RUN);
1076                                 while (READ_REG(BIT_BUSY_FLAG))
1077                                         ;
1078                         } else {
1079                                 dev_warn(vpu_dev, "PC=0 before suspend\n");
1080                         }
1081                 }
1082         }
1083 out:
1084         mutex_unlock(&vpu_data->lock);
1085         return 0;
1086 }
1087
1088 static SIMPLE_DEV_PM_OPS(vpu_pm_ops, vpu_suspend, vpu_resume);
1089 #define VPU_PM_OPS &vpu_pm_ops
1090 #else
1091 #define VPU_PM_OPS NULL
1092 #endif /* !CONFIG_PM_SLEEP */
1093
1094 /*! Driver definition
1095  *
1096  */
1097 static struct platform_driver mxcvpu_driver = {
1098         .driver = {
1099                 .name = "mxc_vpu",
1100                 .of_match_table = vpu_of_match,
1101                 .pm = VPU_PM_OPS,
1102         },
1103         .probe = vpu_dev_probe,
1104         .remove = vpu_dev_remove,
1105 };
1106
1107 module_platform_driver(mxcvpu_driver);
1108
1109 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1110 MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
1111 MODULE_LICENSE("GPL");