2 * Copyright 2006-2013 Freescale Semiconductor, Inc. All Rights Reserved.
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
17 * @brief VPU system initialization and file operation implementation
22 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/stat.h>
27 #include <linux/platform_device.h>
28 #include <linux/kdev_t.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/wait.h>
31 #include <linux/list.h>
32 #include <linux/clk.h>
33 #include <linux/delay.h>
34 #include <linux/fsl_devices.h>
35 #include <linux/uaccess.h>
37 #include <linux/slab.h>
38 #include <linux/workqueue.h>
39 #include <linux/sched.h>
40 #include <linux/vmalloc.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/page-flags.h>
43 #include <linux/mm_types.h>
44 #include <linux/types.h>
45 #include <linux/memblock.h>
46 #include <linux/memory.h>
47 #include <linux/version.h>
48 #include <linux/module.h>
49 #include <linux/pm_runtime.h>
50 #include <linux/sizes.h>
51 #include <linux/genalloc.h>
53 #include <linux/of_device.h>
54 #include <linux/reset.h>
55 #include <linux/clk.h>
56 #include <linux/mxc_vpu.h>
58 /* Define one new pgprot which combined uncached and XN(never executable) */
59 #define pgprot_noncachedxn(prot) \
60 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
63 struct fasync_struct *async_queue;
64 struct work_struct work;
65 struct workqueue_struct *workqueue;
67 const struct mxc_vpu_soc_data *soc_data;
71 struct vpu_user_data {
72 struct vpu_priv *vpu_data;
76 /* To track the allocated memory buffer */
77 struct memalloc_record {
78 struct list_head list;
79 struct vpu_mem_desc mem;
87 struct mxc_vpu_soc_data {
88 unsigned vpu_pwr_mgmnt:1,
98 static struct gen_pool *iram_pool;
101 static LIST_HEAD(mem_list);
103 static int vpu_major;
104 static struct class *vpu_class;
105 static struct vpu_priv *vpu_data;
106 static u8 open_count;
107 static struct clk *vpu_clk;
108 static struct vpu_mem_desc bitwork_mem;
109 static struct vpu_mem_desc pic_para_mem;
110 static struct vpu_mem_desc user_data_mem;
111 static struct vpu_mem_desc share_mem;
112 static struct vpu_mem_desc vshare_mem;
114 static void __iomem *vpu_base;
115 static int vpu_ipi_irq;
116 static u32 phy_vpu_base_addr;
118 static struct device *vpu_dev;
121 static struct iram_setting iram;
123 /* implement the blocking ioctl */
124 static int irq_status;
125 static int codec_done;
126 static wait_queue_head_t vpu_queue;
128 static int vpu_jpu_irq;
130 #ifdef CONFIG_PM_SLEEP
131 static unsigned int regBk[64];
132 static unsigned int pc_before_suspend;
134 static struct regulator *vpu_regulator;
136 #define READ_REG(x) readl_relaxed(vpu_base + (x))
137 #define WRITE_REG(val, x) writel_relaxed(val, vpu_base + (x))
139 static int vpu_clk_enable(struct vpu_priv *vpu_data)
141 if (WARN_ON(vpu_data->clk_enabled < 0))
144 if (vpu_data->clk_enabled++ == 0)
145 return clk_prepare_enable(vpu_clk);
150 static int vpu_clk_disable(struct vpu_priv *vpu_data)
152 if (WARN_ON(vpu_data->clk_enabled <= 0))
155 if (--vpu_data->clk_enabled == 0)
156 clk_disable_unprepare(vpu_clk);
160 static inline int vpu_reset(void)
162 return device_reset(vpu_dev);
165 static void vpu_power_up(void)
169 if (IS_ERR(vpu_regulator))
172 ret = regulator_enable(vpu_regulator);
174 dev_err(vpu_dev, "failed to power up vpu: %d\n", ret);
177 static void vpu_power_down(void)
181 if (IS_ERR(vpu_regulator))
184 ret = regulator_disable(vpu_regulator);
186 dev_err(vpu_dev, "failed to power down vpu: %d\n", ret);
190 * Private function to alloc dma buffer
191 * @return status 0 success.
193 static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
195 mem->cpu_addr = dma_alloc_coherent(vpu_dev, PAGE_ALIGN(mem->size),
197 GFP_DMA | GFP_KERNEL);
198 dev_dbg(vpu_dev, "[ALLOC] mem alloc cpu_addr = %p\n", mem->cpu_addr);
199 if (mem->cpu_addr == NULL) {
200 dev_err(vpu_dev, "Physical memory allocation error!\n");
207 * Private function to free dma buffer
209 static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
211 if (mem->cpu_addr != NULL)
212 dma_free_coherent(vpu_dev, PAGE_ALIGN(mem->size),
213 mem->cpu_addr, mem->phy_addr);
217 * Private function to free buffers
218 * @return status 0 success.
220 static int vpu_free_buffers(void)
222 struct memalloc_record *rec, *n;
223 struct vpu_mem_desc mem;
225 list_for_each_entry_safe(rec, n, &mem_list, list) {
227 if (mem.cpu_addr != 0) {
228 vpu_free_dma_buffer(&mem);
229 dev_dbg(vpu_dev, "[FREE] freed paddr=0x%08X\n", mem.phy_addr);
230 /* delete from list */
231 list_del(&rec->list);
239 static inline void vpu_worker_callback(struct work_struct *w)
241 struct vpu_priv *dev = container_of(w, struct vpu_priv, work);
243 if (dev->async_queue)
244 kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
248 * Clock is gated on when dec/enc started, gate it off when
254 wake_up_interruptible(&vpu_queue);
258 * @brief vpu interrupt handler
260 static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
262 struct vpu_priv *dev = dev_id;
265 reg = READ_REG(BIT_INT_REASON);
268 WRITE_REG(0x1, BIT_INT_CLEAR);
270 queue_work(dev->workqueue, &dev->work);
276 * @brief vpu jpu interrupt handler
278 static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
280 struct vpu_priv *dev = dev_id;
283 reg = READ_REG(MJPEG_PIC_STATUS_REG);
287 queue_work(dev->workqueue, &dev->work);
293 * @brief open function for vpu file operation
295 * @return 0 on success or negative error code on error
297 static int vpu_open(struct inode *inode, struct file *filp)
299 struct vpu_user_data *user_data = devm_kzalloc(vpu_dev,
302 if (user_data == NULL)
305 user_data->vpu_data = vpu_data;
307 mutex_lock(&vpu_data->lock);
309 if (open_count++ == 0) {
310 pm_runtime_get_sync(vpu_dev);
314 filp->private_data = user_data;
315 mutex_unlock(&vpu_data->lock);
320 * @brief IO ctrl function for vpu file operation
321 * @param cmd IO ctrl command
322 * @return 0 on success or negative error code on error
324 static long vpu_ioctl(struct file *filp, u_int cmd,
328 struct vpu_user_data *user_data = filp->private_data;
329 struct vpu_priv *vpu_data = user_data->vpu_data;
332 case VPU_IOC_PHYMEM_ALLOC:
334 struct memalloc_record *rec;
336 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
340 if (copy_from_user(&rec->mem,
341 (struct vpu_mem_desc *)arg,
342 sizeof(struct vpu_mem_desc))) {
347 dev_dbg(vpu_dev, "[ALLOC] mem alloc size = 0x%x\n",
350 ret = vpu_alloc_dma_buffer(&rec->mem);
355 if (copy_to_user((void __user *)arg, &rec->mem,
356 sizeof(struct vpu_mem_desc))) {
361 mutex_lock(&vpu_data->lock);
362 list_add(&rec->list, &mem_list);
363 mutex_unlock(&vpu_data->lock);
367 case VPU_IOC_PHYMEM_FREE:
369 struct memalloc_record *rec, *n;
370 struct vpu_mem_desc vpu_mem;
372 if (copy_from_user(&vpu_mem,
373 (struct vpu_mem_desc *)arg,
374 sizeof(struct vpu_mem_desc)))
377 dev_dbg(vpu_dev, "[FREE] mem freed cpu_addr = %p\n",
379 if (vpu_mem.cpu_addr != NULL)
380 vpu_free_dma_buffer(&vpu_mem);
382 mutex_lock(&vpu_data->lock);
383 list_for_each_entry_safe(rec, n, &mem_list, list) {
384 if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
385 list_del(&rec->list);
390 mutex_unlock(&vpu_data->lock);
395 case VPU_IOC_WAIT4INT:
397 u_long timeout = arg;
399 ret = wait_event_interruptible_timeout(vpu_queue,
401 msecs_to_jiffies(timeout));
403 dev_warn(vpu_dev, "VPU blocking: timeout.\n");
405 } else if (signal_pending(current)) {
406 dev_warn(vpu_dev, "VPU interrupt received.\n");
409 ret = irq_status = 0;
413 case VPU_IOC_IRAM_SETTING:
414 ret = copy_to_user((void __user *)arg, &iram,
415 sizeof(struct iram_setting));
420 case VPU_IOC_CLKGATE_SETTING:
424 if (get_user(clkgate_en, (u32 __user *)arg))
427 mutex_lock(&vpu_data->lock);
429 ret = vpu_clk_enable(vpu_data);
431 user_data->clk_enable_cnt++;
433 if (user_data->clk_enable_cnt == 0) {
436 if (--user_data->clk_enable_cnt == 0)
437 vpu_clk_disable(vpu_data);
441 mutex_unlock(&vpu_data->lock);
444 case VPU_IOC_GET_SHARE_MEM:
445 mutex_lock(&vpu_data->lock);
446 if (share_mem.cpu_addr == NULL) {
447 if (copy_from_user(&share_mem,
448 (struct vpu_mem_desc *)arg,
449 sizeof(struct vpu_mem_desc))) {
450 mutex_unlock(&vpu_data->lock);
453 ret = vpu_alloc_dma_buffer(&share_mem);
455 mutex_unlock(&vpu_data->lock);
459 if (copy_to_user((void __user *)arg,
461 sizeof(struct vpu_mem_desc)))
465 mutex_unlock(&vpu_data->lock);
467 case VPU_IOC_REQ_VSHARE_MEM:
468 mutex_lock(&vpu_data->lock);
469 if (vshare_mem.cpu_addr == NULL) {
470 if (copy_from_user(&vshare_mem,
471 (struct vpu_mem_desc *)arg,
474 mutex_unlock(&vpu_data->lock);
477 vshare_mem.cpu_addr = vmalloc_user(vshare_mem.size);
478 if (vshare_mem.cpu_addr == NULL) {
479 mutex_unlock(&vpu_data->lock);
483 if (copy_to_user((void __user *)arg, &vshare_mem,
484 sizeof(struct vpu_mem_desc)))
488 mutex_unlock(&vpu_data->lock);
490 case VPU_IOC_GET_WORK_ADDR:
491 if (bitwork_mem.cpu_addr == 0) {
492 if (copy_from_user(&bitwork_mem,
493 (struct vpu_mem_desc *)arg,
494 sizeof(struct vpu_mem_desc)))
497 ret = vpu_alloc_dma_buffer(&bitwork_mem);
501 if (copy_to_user((void __user *)arg,
510 * The following two ioctls are used when user allocates a working buffer
511 * and registers it to vpu driver.
513 case VPU_IOC_QUERY_BITWORK_MEM:
514 if (copy_to_user((void __user *)arg,
516 sizeof(struct vpu_mem_desc)))
521 case VPU_IOC_SET_BITWORK_MEM:
522 if (copy_from_user(&bitwork_mem,
523 (struct vpu_mem_desc *)arg,
524 sizeof(struct vpu_mem_desc)))
529 case VPU_IOC_SYS_SW_RESET:
532 case VPU_IOC_REG_DUMP:
533 case VPU_IOC_PHYMEM_DUMP:
536 case VPU_IOC_PHYMEM_CHECK:
538 struct vpu_mem_desc check_memory;
540 if (copy_from_user(&check_memory, (void __user *)arg,
541 sizeof(struct vpu_mem_desc)))
544 check_memory.size = 1;
545 if (copy_to_user((void __user *)arg, &check_memory,
546 sizeof(struct vpu_mem_desc)))
552 case VPU_IOC_LOCK_DEV:
556 if (get_user(lock_en, (u32 __user *)arg))
560 mutex_lock(&vpu_data->lock);
562 mutex_unlock(&vpu_data->lock);
567 dev_err(vpu_dev, "No such IOCTL, cmd is %d\n", cmd);
574 * @brief Release function for vpu file operation
575 * @return 0 on success or negative error code on error
577 static int vpu_release(struct inode *inode, struct file *filp)
579 unsigned long timeout;
580 struct vpu_user_data *user_data = filp->private_data;
581 struct vpu_priv *vpu_data = user_data->vpu_data;
583 mutex_lock(&vpu_data->lock);
585 if (open_count > 0 && !--open_count) {
586 /* Wait for vpu go to idle state */
587 vpu_clk_enable(vpu_data);
588 if (READ_REG(BIT_CUR_PC)) {
590 timeout = jiffies + HZ;
591 while (READ_REG(BIT_BUSY_FLAG)) {
593 if (time_after(jiffies, timeout)) {
594 dev_warn(vpu_dev, "VPU timeout during release\n");
599 /* Clean up interrupt */
600 cancel_work_sync(&vpu_data->work);
601 flush_workqueue(vpu_data->workqueue);
604 if (READ_REG(BIT_BUSY_FLAG)) {
605 if (vpu_data->soc_data->is_mx51 ||
606 vpu_data->soc_data->is_mx53) {
608 "fatal error: can't gate/power off when VPU is busy\n");
609 vpu_clk_disable(vpu_data);
610 mutex_unlock(&vpu_data->lock);
613 if (vpu_data->soc_data->is_mx6dl ||
614 vpu_data->soc_data->is_mx6q) {
615 WRITE_REG(0x11, 0x10F0);
616 timeout = jiffies + HZ;
617 while (READ_REG(0x10F4) != 0x77) {
619 if (time_after(jiffies, timeout))
623 if (READ_REG(0x10F4) != 0x77) {
625 "fatal error: can't gate/power off when VPU is busy\n");
626 WRITE_REG(0x0, 0x10F0);
627 vpu_clk_disable(vpu_data);
628 mutex_unlock(&vpu_data->lock);
638 /* Free shared memory when vpu device is idle */
639 vpu_free_dma_buffer(&share_mem);
640 share_mem.cpu_addr = 0;
641 vfree(vshare_mem.cpu_addr);
642 vshare_mem.cpu_addr = 0;
644 if (user_data->clk_enable_cnt)
645 vpu_clk_disable(vpu_data);
647 vpu_clk_disable(vpu_data);
649 pm_runtime_put_sync_suspend(vpu_dev);
650 devm_kfree(vpu_dev, user_data);
652 mutex_unlock(&vpu_data->lock);
658 * @brief fasync function for vpu file operation
659 * @return 0 on success or negative error code on error
661 static int vpu_fasync(int fd, struct file *filp, int mode)
663 struct vpu_user_data *user_data = filp->private_data;
664 struct vpu_priv *vpu_data = user_data->vpu_data;
665 return fasync_helper(fd, filp, mode, &vpu_data->async_queue);
669 * @brief memory map function of harware registers for vpu file operation
670 * @return 0 on success or negative error code on error
672 static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
676 vm->vm_flags |= VM_IO;
678 * Since vpu registers have been mapped with ioremap() at probe
679 * which L_PTE_XN is 1, and the same physical address must be
680 * mapped multiple times with same type, so set L_PTE_XN to 1 here.
681 * Otherwise, there may be unexpected result in video codec.
683 vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
684 pfn = phy_vpu_base_addr >> PAGE_SHIFT;
685 dev_dbg(vpu_dev, "size=0x%08lx, page no.=0x%08lx\n",
686 vm->vm_end - vm->vm_start, pfn);
687 return remap_pfn_range(vm, vm->vm_start, pfn,
688 vm->vm_end - vm->vm_start,
689 vm->vm_page_prot) ? -EAGAIN : 0;
693 * @brief memory map function of memory for vpu file operation
694 * @return 0 on success or negative error code on error
696 static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
698 size_t request_size = vm->vm_end - vm->vm_start;
700 dev_dbg(vpu_dev, "start=0x%08lx, pgoff=0x%08lx, size=%zx\n",
701 vm->vm_start, vm->vm_pgoff, request_size);
703 vm->vm_flags |= VM_IO;
704 vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
706 return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
707 request_size, vm->vm_page_prot) ? -EAGAIN : 0;
711 * @brief memory map function of vmalloced share memory
712 * @return 0 on success or negative error code on error
714 static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
718 ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
719 vm->vm_flags |= VM_IO;
723 * @brief memory map interface for vpu file operation
724 * @return 0 on success or negative error code on error
726 static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
728 unsigned long offset;
730 offset = (unsigned long)vshare_mem.cpu_addr >> PAGE_SHIFT;
732 if (vm->vm_pgoff && (vm->vm_pgoff == offset))
733 return vpu_map_vshare_mem(fp, vm);
734 else if (vm->vm_pgoff)
735 return vpu_map_dma_mem(fp, vm);
737 return vpu_map_hwregs(fp, vm);
740 static const struct file_operations vpu_fops = {
741 .owner = THIS_MODULE,
743 .unlocked_ioctl = vpu_ioctl,
744 .release = vpu_release,
745 .fasync = vpu_fasync,
749 static const struct mxc_vpu_soc_data imx6dl_vpu_data = {
750 .regulator_required = 1,
755 static const struct mxc_vpu_soc_data imx6q_vpu_data = {
756 .quirk_subblk_en = 1,
757 .regulator_required = 1,
762 static const struct mxc_vpu_soc_data imx53_vpu_data = {
765 static const struct mxc_vpu_soc_data imx51_vpu_data = {
769 static const struct of_device_id vpu_of_match[] = {
770 { .compatible = "fsl,imx6dl-vpu", .data = &imx6dl_vpu_data, },
771 { .compatible = "fsl,imx6q-vpu", .data = &imx6q_vpu_data, },
772 { .compatible = "fsl,imx53-vpu", .data = &imx53_vpu_data, },
773 { .compatible = "fsl,imx51-vpu", .data = &imx51_vpu_data, },
776 MODULE_DEVICE_TABLE(of, vpu_of_match);
779 * This function is called by the driver framework to initialize the vpu device.
780 * @param dev The device structure for the vpu passed in by the framework.
781 * @return 0 on success or negative error code on error
783 static int vpu_dev_probe(struct platform_device *pdev)
786 struct device *temp_class;
787 struct resource *res;
788 unsigned long addr = 0;
789 struct device_node *np = pdev->dev.of_node;
791 struct vpu_priv *drv_data;
792 const struct of_device_id *of_id = of_match_device(vpu_of_match,
794 const struct mxc_vpu_soc_data *soc_data = of_id->data;
796 drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
797 if (drv_data == NULL)
800 drv_data->soc_data = soc_data;
801 mutex_init(&drv_data->lock);
803 init_waitqueue_head(&vpu_queue);
804 drv_data->workqueue = create_workqueue("vpu_wq");
805 INIT_WORK(&drv_data->work, vpu_worker_callback);
807 err = of_property_read_u32(np, "iramsize", &iramsize);
808 if (!err && iramsize) {
809 iram_pool = of_get_named_gen_pool(np, "iram", 0);
811 dev_err(&pdev->dev, "iram pool not available\n");
815 iram_base = gen_pool_alloc(iram_pool, iramsize);
817 dev_err(&pdev->dev, "unable to alloc iram\n");
821 addr = gen_pool_virt_to_phys(iram_pool, iram_base);
825 iram.start = iram.end = 0;
828 iram.end = addr + iramsize - 1;
831 vpu_dev = &pdev->dev;
833 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
834 vpu_base = devm_ioremap_resource(&pdev->dev, res);
835 if (IS_ERR(vpu_base))
836 return PTR_ERR(vpu_base);
837 phy_vpu_base_addr = res->start;
839 vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
841 dev_err(vpu_dev, "vpu: unable to get a major for VPU\n");
845 vpu_class = class_create(THIS_MODULE, "mxc_vpu");
846 if (IS_ERR(vpu_class)) {
847 err = PTR_ERR(vpu_class);
851 temp_class = device_create(vpu_class, NULL, MKDEV(vpu_major, 0),
853 if (IS_ERR(temp_class)) {
854 err = PTR_ERR(temp_class);
858 vpu_clk = clk_get(&pdev->dev, "vpu_clk");
859 if (IS_ERR(vpu_clk)) {
860 err = PTR_ERR(vpu_clk);
864 vpu_ipi_irq = platform_get_irq_byname(pdev, "vpu_ipi_irq");
865 if (vpu_ipi_irq < 0) {
866 dev_err(vpu_dev, "vpu: unable to get vpu interrupt\n");
870 err = request_irq(vpu_ipi_irq, vpu_ipi_irq_handler, 0, "VPU_CODEC_IRQ",
875 vpu_regulator = devm_regulator_get(vpu_dev, "pu");
876 if (IS_ERR(vpu_regulator)) {
877 if (drv_data->soc_data->regulator_required) {
878 dev_err(vpu_dev, "failed to get vpu power\n");
881 /* regulator_get will return error on MX5x,
882 * just igore it everywhere
884 dev_warn(vpu_dev, "failed to get vpu power\n");
888 platform_set_drvdata(pdev, drv_data);
890 if (drv_data->soc_data->has_jpu) {
891 vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
892 if (vpu_jpu_irq < 0) {
893 dev_err(vpu_dev, "vpu: unable to get vpu jpu interrupt\n");
897 err = request_irq(vpu_jpu_irq, vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
898 "VPU_JPG_IRQ", drv_data);
903 pm_runtime_enable(&pdev->dev);
906 dev_info(vpu_dev, "VPU initialized\n");
910 device_destroy(vpu_class, MKDEV(vpu_major, 0));
911 class_destroy(vpu_class);
913 unregister_chrdev(vpu_major, "mxc_vpu");
917 static int vpu_dev_remove(struct platform_device *pdev)
919 struct vpu_priv *vpu_data = platform_get_drvdata(pdev);
921 pm_runtime_disable(&pdev->dev);
923 free_irq(vpu_ipi_irq, &vpu_data);
924 #ifdef MXC_VPU_HAS_JPU
925 free_irq(vpu_jpu_irq, &vpu_data);
927 cancel_work_sync(&vpu_data->work);
928 flush_workqueue(vpu_data->workqueue);
929 destroy_workqueue(vpu_data->workqueue);
932 gen_pool_free(iram_pool, iram_base, iram.end-iram.start+1);
935 device_destroy(vpu_class, MKDEV(vpu_major, 0));
936 class_destroy(vpu_class);
937 unregister_chrdev(vpu_major, "mxc_vpu");
941 vpu_free_dma_buffer(&bitwork_mem);
942 vpu_free_dma_buffer(&pic_para_mem);
943 vpu_free_dma_buffer(&user_data_mem);
945 /* reset VPU state */
947 vpu_clk_enable(vpu_data);
949 vpu_clk_disable(vpu_data);
956 #ifdef CONFIG_PM_SLEEP
957 static int vpu_suspend(struct device *dev)
959 struct vpu_priv *vpu_data = dev_get_drvdata(dev);
960 unsigned long timeout;
962 mutex_lock(&vpu_data->lock);
965 /* Wait for vpu go to idle state, suspect vpu cannot be changed
966 * to idle state after about 1 sec
968 timeout = jiffies + HZ;
969 while (READ_REG(BIT_BUSY_FLAG)) {
971 if (time_after(jiffies, timeout)) {
972 mutex_unlock(&vpu_data->lock);
977 if (vpu_data->soc_data->is_mx53) {
978 mutex_unlock(&vpu_data->lock);
982 if (bitwork_mem.cpu_addr != 0) {
985 /* Save 64 registers from BIT_CODE_BUF_ADDR */
986 for (i = 0; i < 64; i++)
987 regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
988 pc_before_suspend = READ_REG(BIT_CUR_PC);
991 vpu_clk_disable(vpu_data);
992 /* If VPU is working before suspend, disable
993 * regulator to make usecount right.
998 mutex_unlock(&vpu_data->lock);
1002 static int vpu_resume(struct device *dev)
1005 struct vpu_priv *vpu_data = dev_get_drvdata(dev);
1007 mutex_lock(&vpu_data->lock);
1010 if (vpu_data->soc_data->is_mx53) {
1011 vpu_clk_enable(vpu_data);
1015 /* If VPU is working before suspend, enable
1016 * regulator to make usecount right.
1020 if (bitwork_mem.cpu_addr != NULL) {
1021 u32 *p = bitwork_mem.cpu_addr;
1026 vpu_clk_enable(vpu_data);
1028 pc = READ_REG(BIT_CUR_PC);
1030 dev_warn(vpu_dev, "Not power off after suspend (PC=0x%x)\n", pc);
1034 /* Restore registers */
1035 for (i = 0; i < 64; i++)
1036 WRITE_REG(regBk[i], BIT_CODE_BUF_ADDR + (i * 4));
1038 WRITE_REG(0x0, BIT_RESET_CTRL);
1039 WRITE_REG(0x0, BIT_CODE_RUN);
1041 /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
1042 if (vpu_data->soc_data->quirk_subblk_en)
1043 WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
1046 * Re-load boot code, from the codebuffer in external RAM.
1047 * Thankfully, we only need 4096 bytes, same for all platforms.
1049 for (i = 0; i < 2048; i += 4) {
1050 data = p[(i / 2) + 1];
1051 data_hi = (data >> 16) & 0xFFFF;
1052 data_lo = data & 0xFFFF;
1053 WRITE_REG((i << 16) | data_hi, BIT_CODE_DOWN);
1054 WRITE_REG(((i + 1) << 16) | data_lo,
1058 data_hi = (data >> 16) & 0xFFFF;
1059 data_lo = data & 0xFFFF;
1060 WRITE_REG(((i + 2) << 16) | data_hi,
1062 WRITE_REG(((i + 3) << 16) | data_lo,
1066 if (pc_before_suspend) {
1067 WRITE_REG(0x1, BIT_BUSY_FLAG);
1068 WRITE_REG(0x1, BIT_CODE_RUN);
1069 while (READ_REG(BIT_BUSY_FLAG))
1072 dev_warn(vpu_dev, "PC=0 before suspend\n");
1077 mutex_unlock(&vpu_data->lock);
1081 static SIMPLE_DEV_PM_OPS(vpu_pm_ops, vpu_suspend, vpu_resume);
1082 #define VPU_PM_OPS &vpu_pm_ops
1084 #define VPU_PM_OPS NULL
1085 #endif /* !CONFIG_PM_SLEEP */
1087 /*! Driver definition
1090 static struct platform_driver mxcvpu_driver = {
1093 .of_match_table = vpu_of_match,
1096 .probe = vpu_dev_probe,
1097 .remove = vpu_dev_remove,
1100 module_platform_driver(mxcvpu_driver);
1102 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1103 MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
1104 MODULE_LICENSE("GPL");