]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/mxc/vpu/mxc_vpu.c
KARO: cleanup after merge of Freescale 3.10.17 stuff
[karo-tx-linux.git] / drivers / mxc / vpu / mxc_vpu.c
1 /*
2  * Copyright 2006-2013 Freescale Semiconductor, Inc. All Rights Reserved.
3  */
4
5 /*
6  * The code contained herein is licensed under the GNU General Public
7  * License. You may obtain a copy of the GNU General Public License
8  * Version 2 or later at the following locations:
9  *
10  * http://www.opensource.org/licenses/gpl-license.html
11  * http://www.gnu.org/copyleft/gpl.html
12  */
13
14 /*!
15  * @file mxc_vpu.c
16  *
17  * @brief VPU system initialization and file operation implementation
18  *
19  * @ingroup VPU
20  */
21
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/stat.h>
27 #include <linux/platform_device.h>
28 #include <linux/kdev_t.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/wait.h>
31 #include <linux/list.h>
32 #include <linux/clk.h>
33 #include <linux/delay.h>
34 #include <linux/fsl_devices.h>
35 #include <linux/uaccess.h>
36 #include <linux/io.h>
37 #include <linux/slab.h>
38 #include <linux/workqueue.h>
39 #include <linux/sched.h>
40 #include <linux/vmalloc.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/page-flags.h>
43 #include <linux/mm_types.h>
44 #include <linux/types.h>
45 #include <linux/memblock.h>
46 #include <linux/memory.h>
47 #include <linux/version.h>
48 #include <linux/module.h>
49 #include <linux/pm_runtime.h>
50 #include <linux/sizes.h>
51 #include <linux/genalloc.h>
52 #include <linux/of.h>
53 #include <linux/of_device.h>
54 #include <linux/reset.h>
55 #include <linux/clk.h>
56 #include <linux/mxc_vpu.h>
57
58 /* Define one new pgprot which combined uncached and XN(never executable) */
59 #define pgprot_noncachedxn(prot) \
60         __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
61
62 struct vpu_priv {
63         struct fasync_struct *async_queue;
64         struct work_struct work;
65         struct workqueue_struct *workqueue;
66         struct mutex lock;
67         const struct mxc_vpu_soc_data *soc_data;
68         int clk_enabled;
69         struct list_head users;
70 };
71
72 struct vpu_user_data {
73         struct vpu_priv *vpu_data;
74         struct list_head list;
75         int clk_enable_cnt;
76 };
77
78 /* To track the allocated memory buffer */
79 struct memalloc_record {
80         struct list_head list;
81         struct vpu_mem_desc mem;
82 };
83
84 struct iram_setting {
85         u32 start;
86         u32 end;
87 };
88
89 struct mxc_vpu_soc_data {
90         unsigned vpu_pwr_mgmnt:1,
91                 regulator_required:1,
92                 quirk_subblk_en:1,
93                 is_mx51:1,
94                 is_mx53:1,
95                 is_mx6dl:1,
96                 is_mx6q:1,
97                 has_jpu:1;
98 };
99
100 static struct gen_pool *iram_pool;
101 static u32 iram_base;
102
103 static LIST_HEAD(mem_list);
104
105 static int vpu_major;
106 static struct class *vpu_class;
107 static struct vpu_priv vpu_data;
108 static u8 open_count;
109 static struct clk *vpu_clk;
110 static struct vpu_mem_desc bitwork_mem;
111 static struct vpu_mem_desc pic_para_mem;
112 static struct vpu_mem_desc user_data_mem;
113 static struct vpu_mem_desc share_mem;
114 static struct vpu_mem_desc vshare_mem;
115
116 static void __iomem *vpu_base;
117 static int vpu_ipi_irq;
118 static u32 phy_vpu_base_addr;
119
120 static struct device *vpu_dev;
121
122 /* IRAM setting */
123 static struct iram_setting iram;
124
125 /* implement the blocking ioctl */
126 static int irq_status;
127 static int codec_done;
128 static wait_queue_head_t vpu_queue;
129
130 static int vpu_jpu_irq;
131
132 #ifdef CONFIG_PM
133 static unsigned int regBk[64];
134 static unsigned int pc_before_suspend;
135 #endif
136 static struct regulator *vpu_regulator;
137
138 #define READ_REG(x)             readl_relaxed(vpu_base + (x))
139 #define WRITE_REG(val, x)       writel_relaxed(val, vpu_base + (x))
140
141 static int vpu_clk_enable(struct vpu_priv *vpu_data)
142 {
143         int ret = 0;
144
145         if (vpu_data->clk_enabled++ == 0)
146                 ret = clk_prepare_enable(vpu_clk);
147
148         if (WARN_ON(vpu_data->clk_enabled <= 0))
149                 return -EINVAL;
150
151         return ret;
152 }
153
154 static int vpu_clk_disable(struct vpu_priv *vpu_data)
155 {
156         if (WARN_ON(vpu_data->clk_enabled == 0))
157                 return -EINVAL;
158
159         if (--vpu_data->clk_enabled == 0)
160                 clk_disable_unprepare(vpu_clk);
161         return 0;
162 }
163
164 static inline int vpu_reset(void)
165 {
166         return device_reset(vpu_dev);
167 }
168
169 static void vpu_power_up(void)
170 {
171         int ret;
172
173         if (IS_ERR(vpu_regulator))
174                 return;
175
176         ret = regulator_enable(vpu_regulator);
177         if (ret)
178                 dev_err(vpu_dev, "failed to power up vpu: %d\n", ret);
179 }
180
181 static void vpu_power_down(void)
182 {
183         int ret;
184
185         if (IS_ERR(vpu_regulator))
186                 return;
187
188         ret = regulator_disable(vpu_regulator);
189         if (ret)
190                 dev_err(vpu_dev, "failed to power down vpu: %d\n", ret);
191 }
192
193 /*!
194  * Private function to alloc dma buffer
195  * @return status  0 success.
196  */
197 static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
198 {
199         mem->cpu_addr = dma_alloc_coherent(vpu_dev, PAGE_ALIGN(mem->size),
200                                         &mem->phy_addr,
201                                         GFP_DMA | GFP_KERNEL);
202         dev_dbg(vpu_dev, "[ALLOC] mem alloc cpu_addr = %p\n", mem->cpu_addr);
203         if (mem->cpu_addr == NULL) {
204                 dev_err(vpu_dev, "Physical memory allocation error!\n");
205                 return -ENOMEM;
206         }
207         return 0;
208 }
209
210 /*!
211  * Private function to free dma buffer
212  */
213 static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
214 {
215         if (mem->cpu_addr != NULL)
216                 dma_free_coherent(vpu_dev, PAGE_ALIGN(mem->size),
217                                 mem->cpu_addr, mem->phy_addr);
218 }
219
220 /*!
221  * Private function to free buffers
222  * @return status  0 success.
223  */
224 static int vpu_free_buffers(void)
225 {
226         struct memalloc_record *rec, *n;
227         struct vpu_mem_desc mem;
228
229         list_for_each_entry_safe(rec, n, &mem_list, list) {
230                 mem = rec->mem;
231                 if (mem.cpu_addr != 0) {
232                         vpu_free_dma_buffer(&mem);
233                         dev_dbg(vpu_dev, "[FREE] freed paddr=0x%08X\n", mem.phy_addr);
234                         /* delete from list */
235                         list_del(&rec->list);
236                         kfree(rec);
237                 }
238         }
239
240         return 0;
241 }
242
243 static inline void vpu_worker_callback(struct work_struct *w)
244 {
245         struct vpu_priv *dev = container_of(w, struct vpu_priv, work);
246
247         if (dev->async_queue)
248                 kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
249
250         irq_status = 1;
251         /*
252          * Clock is gated on when dec/enc started, gate it off when
253          * codec is done.
254          */
255         if (codec_done)
256                 codec_done = 0;
257
258         wake_up_interruptible(&vpu_queue);
259 }
260
261 /*!
262  * @brief vpu interrupt handler
263  */
264 static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
265 {
266         struct vpu_priv *dev = dev_id;
267         unsigned long reg;
268
269         reg = READ_REG(BIT_INT_REASON);
270         if (reg & 0x8)
271                 codec_done = 1;
272         WRITE_REG(0x1, BIT_INT_CLEAR);
273
274         queue_work(dev->workqueue, &dev->work);
275
276         return IRQ_HANDLED;
277 }
278
279 /*!
280  * @brief vpu jpu interrupt handler
281  */
282 static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
283 {
284         struct vpu_priv *dev = dev_id;
285         unsigned long reg;
286
287         reg = READ_REG(MJPEG_PIC_STATUS_REG);
288         if (reg & 0x3)
289                 codec_done = 1;
290
291         queue_work(dev->workqueue, &dev->work);
292
293         return IRQ_HANDLED;
294 }
295
296 /*!
297  * @brief open function for vpu file operation
298  *
299  * @return  0 on success or negative error code on error
300  */
301 static int vpu_open(struct inode *inode, struct file *filp)
302 {
303         struct vpu_user_data *user_data = devm_kzalloc(vpu_dev,
304                                                 sizeof(*user_data),
305                                                 GFP_KERNEL);
306         if (user_data == NULL)
307                 return -ENOMEM;
308
309         user_data->vpu_data = &vpu_data;
310
311         INIT_LIST_HEAD(&user_data->list);
312         list_add(&user_data->list, &vpu_data.users);
313
314         mutex_lock(&vpu_data.lock);
315
316         if (open_count++ == 0) {
317                 pm_runtime_get_sync(vpu_dev);
318                 vpu_power_up();
319         }
320
321         filp->private_data = user_data;
322         mutex_unlock(&vpu_data.lock);
323         return 0;
324 }
325
326 /*!
327  * @brief IO ctrl function for vpu file operation
328  * @param cmd IO ctrl command
329  * @return  0 on success or negative error code on error
330  */
331 static long vpu_ioctl(struct file *filp, u_int cmd,
332                      u_long arg)
333 {
334         int ret = -EINVAL;
335         struct vpu_user_data *user_data = filp->private_data;
336         struct vpu_priv *vpu_data = user_data->vpu_data;
337
338         switch (cmd) {
339         case VPU_IOC_PHYMEM_ALLOC:
340         {
341                 struct memalloc_record *rec;
342
343                 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
344                 if (!rec)
345                         return -ENOMEM;
346
347                 if (copy_from_user(&rec->mem,
348                                         (struct vpu_mem_desc *)arg,
349                                         sizeof(struct vpu_mem_desc))) {
350                         kfree(rec);
351                         return -EFAULT;
352                 }
353
354                 dev_dbg(vpu_dev, "[ALLOC] mem alloc size = 0x%x\n",
355                         rec->mem.size);
356
357                 ret = vpu_alloc_dma_buffer(&rec->mem);
358                 if (ret) {
359                         kfree(rec);
360                         return ret;
361                 }
362                 if (copy_to_user((void __user *)arg, &rec->mem,
363                                         sizeof(struct vpu_mem_desc))) {
364                         kfree(rec);
365                         return -EFAULT;
366                 }
367
368                 mutex_lock(&vpu_data->lock);
369                 list_add(&rec->list, &mem_list);
370                 mutex_unlock(&vpu_data->lock);
371
372                 break;
373         }
374         case VPU_IOC_PHYMEM_FREE:
375         {
376                 struct memalloc_record *rec, *n;
377                 struct vpu_mem_desc vpu_mem;
378
379                 if (copy_from_user(&vpu_mem,
380                                         (struct vpu_mem_desc *)arg,
381                                         sizeof(struct vpu_mem_desc)))
382                         return -EFAULT;
383
384                 dev_dbg(vpu_dev, "[FREE] mem freed cpu_addr = %p\n",
385                         vpu_mem.cpu_addr);
386                 if (vpu_mem.cpu_addr != NULL)
387                         vpu_free_dma_buffer(&vpu_mem);
388
389                 mutex_lock(&vpu_data->lock);
390                 list_for_each_entry_safe(rec, n, &mem_list, list) {
391                         if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
392                                 list_del(&rec->list);
393                                 break;
394                         }
395                 }
396                 kfree(rec);
397                 mutex_unlock(&vpu_data->lock);
398
399                 break;
400         }
401         case VPU_IOC_WAIT4INT:
402         {
403                 u_long timeout = arg;
404
405                 ret = wait_event_interruptible_timeout(vpu_queue,
406                                                 irq_status != 0,
407                                                 msecs_to_jiffies(timeout));
408                 if (ret == 0) {
409                         dev_warn(vpu_dev, "VPU blocking: timeout.\n");
410                         ret = -ETIMEDOUT;
411                 } else if (signal_pending(current)) {
412                         dev_warn(vpu_dev, "VPU interrupt received.\n");
413                         ret = -ERESTARTSYS;
414                 } else {
415                         irq_status = 0;
416                 }
417                 break;
418         }
419         case VPU_IOC_IRAM_SETTING:
420                 ret = copy_to_user((void __user *)arg, &iram,
421                                 sizeof(struct iram_setting));
422                 if (ret)
423                         ret = -EFAULT;
424
425                 break;
426         case VPU_IOC_CLKGATE_SETTING:
427         {
428                 u32 clkgate_en;
429
430                 if (get_user(clkgate_en, (u32 __user *)arg))
431                         return -EFAULT;
432
433                 mutex_lock(&vpu_data->lock);
434                 if (clkgate_en) {
435                         ret = vpu_clk_enable(vpu_data);
436                         if (ret == 0)
437                                 user_data->clk_enable_cnt++;
438                 } else {
439                         if (user_data->clk_enable_cnt == 0) {
440                                 ret = -EINVAL;
441                         } else {
442                                 if (--user_data->clk_enable_cnt == 0)
443                                         vpu_clk_disable(vpu_data);
444                                 ret = 0;
445                         }
446                 }
447                 mutex_unlock(&vpu_data->lock);
448                 break;
449         }
450         case VPU_IOC_GET_SHARE_MEM:
451                 mutex_lock(&vpu_data->lock);
452                 if (share_mem.cpu_addr == NULL) {
453                         if (copy_from_user(&share_mem,
454                                                 (struct vpu_mem_desc *)arg,
455                                                 sizeof(struct vpu_mem_desc))) {
456                                 mutex_unlock(&vpu_data->lock);
457                                 return -EFAULT;
458                         }
459                         ret = vpu_alloc_dma_buffer(&share_mem);
460                         if (ret) {
461                                 mutex_unlock(&vpu_data->lock);
462                                 return ret;
463                         }
464                 }
465                 if (copy_to_user((void __user *)arg,
466                                         &share_mem,
467                                         sizeof(struct vpu_mem_desc)))
468                         ret = -EFAULT;
469                 else
470                         ret = 0;
471                 mutex_unlock(&vpu_data->lock);
472                 break;
473         case VPU_IOC_REQ_VSHARE_MEM:
474                 mutex_lock(&vpu_data->lock);
475                 if (vshare_mem.cpu_addr == NULL) {
476                         if (copy_from_user(&vshare_mem,
477                                                 (struct vpu_mem_desc *)arg,
478                                                 sizeof(struct
479                                                         vpu_mem_desc))) {
480                                 mutex_unlock(&vpu_data->lock);
481                                 return -EFAULT;
482                         }
483                         vshare_mem.cpu_addr = vmalloc_user(vshare_mem.size);
484                         if (vshare_mem.cpu_addr == NULL) {
485                                 mutex_unlock(&vpu_data->lock);
486                                 return -ENOMEM;
487                         }
488                 }
489                 if (copy_to_user((void __user *)arg, &vshare_mem,
490                                         sizeof(struct vpu_mem_desc)))
491                         ret = -EFAULT;
492                 else
493                         ret = 0;
494                 mutex_unlock(&vpu_data->lock);
495                 break;
496         case VPU_IOC_GET_WORK_ADDR:
497                 if (bitwork_mem.cpu_addr == 0) {
498                         if (copy_from_user(&bitwork_mem,
499                                                 (struct vpu_mem_desc *)arg,
500                                                 sizeof(struct vpu_mem_desc)))
501                                 return -EFAULT;
502
503                         ret = vpu_alloc_dma_buffer(&bitwork_mem);
504                         if (ret)
505                                 return ret;
506                 }
507                 if (copy_to_user((void __user *)arg,
508                                         &bitwork_mem,
509                                         sizeof(struct
510                                                 vpu_mem_desc)))
511                         ret = -EFAULT;
512                 else
513                         ret = 0;
514                 break;
515         /*
516          * The following two ioctls are used when user allocates a working buffer
517          * and registers it to vpu driver.
518          */
519         case VPU_IOC_QUERY_BITWORK_MEM:
520                 if (copy_to_user((void __user *)arg,
521                                         &bitwork_mem,
522                                         sizeof(struct vpu_mem_desc)))
523                         ret = -EFAULT;
524                 else
525                         ret = 0;
526                 break;
527         case VPU_IOC_SET_BITWORK_MEM:
528                 if (copy_from_user(&bitwork_mem,
529                                         (struct vpu_mem_desc *)arg,
530                                         sizeof(struct vpu_mem_desc)))
531                         ret = -EFAULT;
532                 else
533                         ret = 0;
534                 break;
535         case VPU_IOC_SYS_SW_RESET:
536                 ret = vpu_reset();
537                 break;
538         case VPU_IOC_REG_DUMP:
539         case VPU_IOC_PHYMEM_DUMP:
540                 ret = 0;
541                 break;
542         case VPU_IOC_PHYMEM_CHECK:
543         {
544                 struct vpu_mem_desc check_memory;
545
546                 ret = copy_from_user(&check_memory,
547                                 (void __user *)arg,
548                                 sizeof(struct vpu_mem_desc));
549                 if (ret != 0) {
550                         dev_err(vpu_dev, "copy from user failure:%d\n", ret);
551                         ret = -EFAULT;
552                         break;
553                 }
554                 check_memory.size = 1;
555                 if (copy_to_user((void __user *)arg, &check_memory,
556                                         sizeof(struct vpu_mem_desc)))
557                         ret = -EFAULT;
558                 else
559                         ret = 0;
560                 break;
561         }
562         case VPU_IOC_LOCK_DEV:
563         {
564                 u32 lock_en;
565
566                 if (get_user(lock_en, (u32 __user *)arg))
567                         return -EFAULT;
568
569                 if (lock_en)
570                         mutex_lock(&vpu_data->lock);
571                 else
572                         mutex_unlock(&vpu_data->lock);
573                 ret = 0;
574                 break;
575         }
576         default:
577                 dev_err(vpu_dev, "No such IOCTL, cmd is %d\n", cmd);
578         }
579         return ret;
580 }
581
582 /*!
583  * @brief Release function for vpu file operation
584  * @return  0 on success or negative error code on error
585  */
586 static int vpu_release(struct inode *inode, struct file *filp)
587 {
588         unsigned long timeout;
589         struct vpu_user_data *user_data = filp->private_data;
590         struct vpu_priv *vpu_data = user_data->vpu_data;
591
592         mutex_lock(&vpu_data->lock);
593
594         if (open_count > 0 && !--open_count) {
595                 /* Wait for vpu go to idle state */
596                 vpu_clk_enable(vpu_data);
597                 if (READ_REG(BIT_CUR_PC)) {
598
599                         timeout = jiffies + HZ;
600                         while (READ_REG(BIT_BUSY_FLAG)) {
601                                 msleep(1);
602                                 if (time_after(jiffies, timeout)) {
603                                         dev_warn(vpu_dev, "VPU timeout during release\n");
604                                         break;
605                                 }
606                         }
607
608                         /* Clean up interrupt */
609                         cancel_work_sync(&vpu_data->work);
610                         flush_workqueue(vpu_data->workqueue);
611                         irq_status = 0;
612
613                         if (READ_REG(BIT_BUSY_FLAG)) {
614                                 if (vpu_data->soc_data->is_mx51 ||
615                                         vpu_data->soc_data->is_mx53) {
616                                         dev_err(vpu_dev,
617                                                 "fatal error: can't gate/power off when VPU is busy\n");
618                                         vpu_clk_disable(vpu_data);
619                                         mutex_unlock(&vpu_data->lock);
620                                         return -EBUSY;
621                                 }
622                                 if (vpu_data->soc_data->is_mx6dl ||
623                                         vpu_data->soc_data->is_mx6q) {
624                                         WRITE_REG(0x11, 0x10F0);
625                                         timeout = jiffies + HZ;
626                                         while (READ_REG(0x10F4) != 0x77) {
627                                                 msleep(1);
628                                                 if (time_after(jiffies, timeout))
629                                                         break;
630                                         }
631
632                                         if (READ_REG(0x10F4) != 0x77) {
633                                                 dev_err(vpu_dev,
634                                                         "fatal error: can't gate/power off when VPU is busy\n");
635                                                 WRITE_REG(0x0, 0x10F0);
636                                                 vpu_clk_disable(vpu_data);
637                                                 mutex_unlock(&vpu_data->lock);
638                                                 return -EBUSY;
639                                         }
640                                         vpu_reset();
641                                 }
642                         }
643                 }
644
645                 vpu_free_buffers();
646
647                 /* Free shared memory when vpu device is idle */
648                 vpu_free_dma_buffer(&share_mem);
649                 share_mem.cpu_addr = 0;
650                 vfree(vshare_mem.cpu_addr);
651                 vshare_mem.cpu_addr = 0;
652
653                 if (user_data->clk_enable_cnt)
654                         vpu_clk_disable(vpu_data);
655
656                 vpu_clk_disable(vpu_data);
657                 vpu_power_down();
658                 pm_runtime_put_sync_suspend(vpu_dev);
659                 devm_kfree(vpu_dev, user_data);
660         }
661         mutex_unlock(&vpu_data->lock);
662
663         return 0;
664 }
665
666 /*!
667  * @brief fasync function for vpu file operation
668  * @return  0 on success or negative error code on error
669  */
670 static int vpu_fasync(int fd, struct file *filp, int mode)
671 {
672         struct vpu_user_data *user_data = filp->private_data;
673         struct vpu_priv *vpu_data = user_data->vpu_data;
674         return fasync_helper(fd, filp, mode, &vpu_data->async_queue);
675 }
676
677 /*!
678  * @brief memory map function of harware registers for vpu file operation
679  * @return  0 on success or negative error code on error
680  */
681 static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
682 {
683         unsigned long pfn;
684
685         vm->vm_flags |= VM_IO;
686         /*
687          * Since vpu registers have been mapped with ioremap() at probe
688          * which L_PTE_XN is 1, and the same physical address must be
689          * mapped multiple times with same type, so set L_PTE_XN to 1 here.
690          * Otherwise, there may be unexpected result in video codec.
691          */
692         vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
693         pfn = phy_vpu_base_addr >> PAGE_SHIFT;
694         dev_dbg(vpu_dev, "size=0x%lx, page no.=0x%lx\n",
695                  vm->vm_end - vm->vm_start, pfn);
696         return remap_pfn_range(vm, vm->vm_start, pfn,
697                         vm->vm_end - vm->vm_start,
698                         vm->vm_page_prot) ? -EAGAIN : 0;
699 }
700
701 /*!
702  * @brief memory map function of memory for vpu file operation
703  * @return  0 on success or negative error code on error
704  */
705 static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
706 {
707         size_t request_size = vm->vm_end - vm->vm_start;
708
709         dev_dbg(vpu_dev, "start=0x%08lx, pgoff=0x%08lx, size=%zx\n",
710                 vm->vm_start, vm->vm_pgoff, request_size);
711
712         vm->vm_flags |= VM_IO;
713         vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
714
715         return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
716                                request_size, vm->vm_page_prot) ? -EAGAIN : 0;
717 }
718
719 /* !
720  * @brief memory map function of vmalloced share memory
721  * @return  0 on success or negative error code on error
722  */
723 static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
724 {
725         int ret;
726
727         ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
728         vm->vm_flags |= VM_IO;
729         return ret;
730 }
731 /*!
732  * @brief memory map interface for vpu file operation
733  * @return  0 on success or negative error code on error
734  */
735 static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
736 {
737         unsigned long offset;
738
739         offset = (unsigned long)vshare_mem.cpu_addr >> PAGE_SHIFT;
740
741         if (vm->vm_pgoff && (vm->vm_pgoff == offset))
742                 return vpu_map_vshare_mem(fp, vm);
743         else if (vm->vm_pgoff)
744                 return vpu_map_dma_mem(fp, vm);
745         else
746                 return vpu_map_hwregs(fp, vm);
747 }
748
749 static const struct file_operations vpu_fops = {
750         .owner = THIS_MODULE,
751         .open = vpu_open,
752         .unlocked_ioctl = vpu_ioctl,
753         .release = vpu_release,
754         .fasync = vpu_fasync,
755         .mmap = vpu_mmap,
756 };
757
758 static const struct mxc_vpu_soc_data imx6dl_vpu_data = {
759         .regulator_required = 1,
760         .vpu_pwr_mgmnt = 1,
761         .has_jpu = 1,
762 };
763
764 static const struct mxc_vpu_soc_data imx6q_vpu_data = {
765         .quirk_subblk_en = 1,
766         .regulator_required = 1,
767         .vpu_pwr_mgmnt = 1,
768         .has_jpu = 1,
769 };
770
771 static const struct mxc_vpu_soc_data imx53_vpu_data = {
772 };
773
774 static const struct mxc_vpu_soc_data imx51_vpu_data = {
775         .vpu_pwr_mgmnt = 1,
776 };
777
778 static const struct of_device_id vpu_of_match[] = {
779         { .compatible = "fsl,imx6dl-vpu", .data = &imx6dl_vpu_data, },
780         { .compatible = "fsl,imx6q-vpu", .data = &imx6q_vpu_data, },
781         { .compatible = "fsl,imx53-vpu", .data = &imx53_vpu_data, },
782         { .compatible = "fsl,imx51-vpu", .data = &imx51_vpu_data, },
783         { /* sentinel */ }
784 };
785 MODULE_DEVICE_TABLE(of, vpu_of_match);
786
787 /*!
788  * This function is called by the driver framework to initialize the vpu device.
789  * @param   dev The device structure for the vpu passed in by the framework.
790  * @return   0 on success or negative error code on error
791  */
792 static int vpu_dev_probe(struct platform_device *pdev)
793 {
794         int err = 0;
795         struct device *temp_class;
796         struct resource *res;
797         unsigned long addr = 0;
798         struct device_node *np = pdev->dev.of_node;
799         u32 iramsize;
800         struct vpu_priv *drv_data;
801         const struct of_device_id *of_id = of_match_device(vpu_of_match,
802                                                         &pdev->dev);
803         const struct mxc_vpu_soc_data *soc_data = of_id->data;
804
805         drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
806         if (drv_data == NULL)
807                 return -ENOMEM;
808
809         drv_data->soc_data = soc_data;
810         INIT_LIST_HEAD(&drv_data->users);
811
812         init_waitqueue_head(&vpu_queue);
813
814         err = of_property_read_u32(np, "iramsize", &iramsize);
815         if (!err && iramsize) {
816                 iram_pool = of_get_named_gen_pool(np, "iram", 0);
817                 if (!iram_pool) {
818                         dev_err(&pdev->dev, "iram pool not available\n");
819                         return -ENOMEM;
820                 }
821
822                 iram_base = gen_pool_alloc(iram_pool, iramsize);
823                 if (!iram_base) {
824                         dev_err(&pdev->dev, "unable to alloc iram\n");
825                         return -ENOMEM;
826                 }
827
828                 addr = gen_pool_virt_to_phys(iram_pool, iram_base);
829         }
830
831         if (addr == 0)
832                 iram.start = iram.end = 0;
833         else {
834                 iram.start = addr;
835                 iram.end = addr + iramsize - 1;
836         }
837
838         vpu_dev = &pdev->dev;
839
840         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
841         if (!res) {
842                 dev_err(vpu_dev, "vpu: unable to get vpu base addr\n");
843                 return -ENODEV;
844         }
845         phy_vpu_base_addr = res->start;
846         vpu_base = devm_ioremap_resource(&pdev->dev, res);
847         if (IS_ERR(vpu_base))
848                 return PTR_ERR(vpu_base);
849
850         vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
851         if (vpu_major < 0) {
852                 dev_err(vpu_dev, "vpu: unable to get a major for VPU\n");
853                 return vpu_major;
854         }
855
856         vpu_class = class_create(THIS_MODULE, "mxc_vpu");
857         if (IS_ERR(vpu_class)) {
858                 err = PTR_ERR(vpu_class);
859                 goto err_out_chrdev;
860         }
861
862         temp_class = device_create(vpu_class, NULL, MKDEV(vpu_major, 0),
863                                    NULL, "mxc_vpu");
864         if (IS_ERR(temp_class)) {
865                 err = PTR_ERR(temp_class);
866                 goto err_out_class;
867         }
868
869         vpu_clk = clk_get(&pdev->dev, "vpu_clk");
870         if (IS_ERR(vpu_clk)) {
871                 err = PTR_ERR(vpu_clk);
872                 goto err_out_class;
873         }
874
875         vpu_ipi_irq = platform_get_irq_byname(pdev, "vpu_ipi_irq");
876         if (vpu_ipi_irq < 0) {
877                 dev_err(vpu_dev, "vpu: unable to get vpu interrupt\n");
878                 err = vpu_ipi_irq;
879                 goto err_out_class;
880         }
881         err = request_irq(vpu_ipi_irq, vpu_ipi_irq_handler, 0, "VPU_CODEC_IRQ",
882                           &vpu_data);
883         if (err)
884                 goto err_out_class;
885
886         vpu_regulator = devm_regulator_get(vpu_dev, "pu");
887         if (IS_ERR(vpu_regulator)) {
888                 if (drv_data->soc_data->regulator_required) {
889                         dev_err(vpu_dev, "failed to get vpu power\n");
890                         goto err_out_class;
891                 } else {
892                         /* regulator_get will return error on MX5x,
893                          * just igore it everywhere
894                          */
895                         dev_warn(vpu_dev, "failed to get vpu power\n");
896                 }
897         }
898
899         platform_set_drvdata(pdev, drv_data);
900
901         if (drv_data->soc_data->has_jpu) {
902                 vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
903                 if (vpu_jpu_irq < 0) {
904                         dev_err(vpu_dev, "vpu: unable to get vpu jpu interrupt\n");
905                         err = vpu_jpu_irq;
906                         goto err_out_class;
907                 }
908                 err = request_irq(vpu_jpu_irq, vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
909                                 "VPU_JPG_IRQ", &vpu_data);
910                 if (err)
911                         goto err_out_class;
912         }
913
914         pm_runtime_enable(&pdev->dev);
915
916         vpu_data.workqueue = create_workqueue("vpu_wq");
917         INIT_WORK(&vpu_data.work, vpu_worker_callback);
918         mutex_init(&vpu_data.lock);
919         dev_info(vpu_dev, "VPU initialized\n");
920         return 0;
921
922 err_out_class:
923         device_destroy(vpu_class, MKDEV(vpu_major, 0));
924         class_destroy(vpu_class);
925 err_out_chrdev:
926         unregister_chrdev(vpu_major, "mxc_vpu");
927         return err;
928 }
929
930 static int vpu_dev_remove(struct platform_device *pdev)
931 {
932         struct vpu_priv *vpu_data = platform_get_drvdata(pdev);
933
934         pm_runtime_disable(&pdev->dev);
935
936         free_irq(vpu_ipi_irq, &vpu_data);
937 #ifdef MXC_VPU_HAS_JPU
938         free_irq(vpu_jpu_irq, &vpu_data);
939 #endif
940         cancel_work_sync(&vpu_data->work);
941         flush_workqueue(vpu_data->workqueue);
942         destroy_workqueue(vpu_data->workqueue);
943
944         iounmap(vpu_base);
945         if (iram.start)
946                 gen_pool_free(iram_pool, iram_base, iram.end-iram.start+1);
947
948         if (vpu_major > 0) {
949                 device_destroy(vpu_class, MKDEV(vpu_major, 0));
950                 class_destroy(vpu_class);
951                 unregister_chrdev(vpu_major, "mxc_vpu");
952                 vpu_major = 0;
953         }
954
955         vpu_free_dma_buffer(&bitwork_mem);
956         vpu_free_dma_buffer(&pic_para_mem);
957         vpu_free_dma_buffer(&user_data_mem);
958
959         /* reset VPU state */
960         vpu_power_up();
961         vpu_clk_enable(vpu_data);
962         vpu_reset();
963         vpu_clk_disable(vpu_data);
964         vpu_power_down();
965
966         clk_put(vpu_clk);
967         return 0;
968 }
969
970 #ifdef CONFIG_PM
971 static int vpu_suspend(struct device *dev)
972 {
973         struct vpu_priv *vpu_data = dev_get_drvdata(dev);
974         unsigned long timeout;
975
976         mutex_lock(&vpu_data->lock);
977
978         if (open_count) {
979                 /* Wait for vpu go to idle state, suspect vpu cannot be changed
980                    to idle state after about 1 sec */
981                 timeout = jiffies + HZ;
982                 while (READ_REG(BIT_BUSY_FLAG)) {
983                         msleep(1);
984                         if (time_after(jiffies, timeout)) {
985                                 mutex_unlock(&vpu_data->lock);
986                                 return -EAGAIN;
987                         }
988                 }
989
990                 if (vpu_data->soc_data->is_mx53) {
991                         mutex_unlock(&vpu_data->lock);
992                         return 0;
993                 }
994
995                 if (bitwork_mem.cpu_addr != 0) {
996                         int i;
997
998                         /* Save 64 registers from BIT_CODE_BUF_ADDR */
999                         for (i = 0; i < 64; i++)
1000                                 regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
1001                         pc_before_suspend = READ_REG(BIT_CUR_PC);
1002                 }
1003
1004                 vpu_clk_disable(vpu_data);
1005                 /* If VPU is working before suspend, disable
1006                  * regulator to make usecount right.
1007                  */
1008                 vpu_power_down();
1009         }
1010
1011         mutex_unlock(&vpu_data->lock);
1012         return 0;
1013 }
1014
1015 static int vpu_resume(struct device *dev)
1016 {
1017         int i;
1018         struct vpu_priv *vpu_data = dev_get_drvdata(dev);
1019
1020         mutex_lock(&vpu_data->lock);
1021
1022         if (open_count) {
1023                 if (vpu_data->soc_data->is_mx53) {
1024                         vpu_clk_enable(vpu_data);
1025                         goto out;
1026                 }
1027
1028                 /* If VPU is working before suspend, enable
1029                  * regulator to make usecount right.
1030                  */
1031                 vpu_power_up();
1032
1033                 if (bitwork_mem.cpu_addr != NULL) {
1034                         u32 *p = bitwork_mem.cpu_addr;
1035                         u32 data, pc;
1036                         u16 data_hi;
1037                         u16 data_lo;
1038
1039                         vpu_clk_enable(vpu_data);
1040
1041                         pc = READ_REG(BIT_CUR_PC);
1042                         if (pc) {
1043                                 dev_warn(vpu_dev, "Not power off after suspend (PC=0x%x)\n", pc);
1044                                 goto out;
1045                         }
1046
1047                         /* Restore registers */
1048                         for (i = 0; i < 64; i++)
1049                                 WRITE_REG(regBk[i], BIT_CODE_BUF_ADDR + (i * 4));
1050
1051                         WRITE_REG(0x0, BIT_RESET_CTRL);
1052                         WRITE_REG(0x0, BIT_CODE_RUN);
1053                         /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
1054                         if (vpu_data->soc_data->quirk_subblk_en)
1055                                 WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
1056
1057                         /*
1058                          * Re-load boot code, from the codebuffer in external RAM.
1059                          * Thankfully, we only need 4096 bytes, same for all platforms.
1060                          */
1061                         for (i = 0; i < 2048; i += 4) {
1062                                 data = p[(i / 2) + 1];
1063                                 data_hi = (data >> 16) & 0xFFFF;
1064                                 data_lo = data & 0xFFFF;
1065                                 WRITE_REG((i << 16) | data_hi, BIT_CODE_DOWN);
1066                                 WRITE_REG(((i + 1) << 16) | data_lo,
1067                                                 BIT_CODE_DOWN);
1068
1069                                 data = p[i / 2];
1070                                 data_hi = (data >> 16) & 0xFFFF;
1071                                 data_lo = data & 0xFFFF;
1072                                 WRITE_REG(((i + 2) << 16) | data_hi,
1073                                                 BIT_CODE_DOWN);
1074                                 WRITE_REG(((i + 3) << 16) | data_lo,
1075                                                 BIT_CODE_DOWN);
1076                         }
1077
1078                         if (pc_before_suspend) {
1079                                 WRITE_REG(0x1, BIT_BUSY_FLAG);
1080                                 WRITE_REG(0x1, BIT_CODE_RUN);
1081                                 while (READ_REG(BIT_BUSY_FLAG))
1082                                         ;
1083                         } else {
1084                                 dev_warn(vpu_dev, "PC=0 before suspend\n");
1085                         }
1086                 }
1087         }
1088 out:
1089         mutex_unlock(&vpu_data->lock);
1090         return 0;
1091 }
1092
1093 static SIMPLE_DEV_PM_OPS(vpu_pm_ops, vpu_suspend, vpu_resume);
1094 #define VPU_PM_OPS &vpu_pm_ops
1095 #else
1096 #define VPU_PM_OPS NULL
1097 #endif /* !CONFIG_PM */
1098
1099 /*! Driver definition
1100  *
1101  */
1102 static struct platform_driver mxcvpu_driver = {
1103         .driver = {
1104                 .name = "mxc_vpu",
1105                 .of_match_table = vpu_of_match,
1106                 .pm = VPU_PM_OPS,
1107         },
1108         .probe = vpu_dev_probe,
1109         .remove = vpu_dev_remove,
1110 };
1111
1112 module_platform_driver(mxcvpu_driver);
1113
1114 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1115 MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
1116 MODULE_LICENSE("GPL");