2 * Copyright 2006-2013 Freescale Semiconductor, Inc. All Rights Reserved.
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
17 * @brief VPU system initialization and file operation implementation
22 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/stat.h>
27 #include <linux/platform_device.h>
28 #include <linux/kdev_t.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/wait.h>
31 #include <linux/list.h>
32 #include <linux/clk.h>
33 #include <linux/delay.h>
34 #include <linux/fsl_devices.h>
35 #include <linux/uaccess.h>
37 #include <linux/slab.h>
38 #include <linux/workqueue.h>
39 #include <linux/sched.h>
40 #include <linux/vmalloc.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/page-flags.h>
43 #include <linux/mm_types.h>
44 #include <linux/types.h>
45 #include <linux/memblock.h>
46 #include <linux/memory.h>
47 #include <linux/version.h>
48 #include <linux/module.h>
49 #include <linux/pm_runtime.h>
50 #include <linux/sizes.h>
51 #include <linux/genalloc.h>
53 #include <linux/of_device.h>
54 #include <linux/reset.h>
55 #include <linux/clk.h>
56 #include <linux/mxc_vpu.h>
58 /* Define one new pgprot which combined uncached and XN(never executable) */
59 #define pgprot_noncachedxn(prot) \
60 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
63 struct fasync_struct *async_queue;
64 struct work_struct work;
65 struct workqueue_struct *workqueue;
67 const struct mxc_vpu_soc_data *soc_data;
71 struct vpu_user_data {
72 struct vpu_priv *vpu_data;
76 /* To track the allocated memory buffer */
77 struct memalloc_record {
78 struct list_head list;
79 struct vpu_mem_desc mem;
87 struct mxc_vpu_soc_data {
88 unsigned vpu_pwr_mgmnt:1,
99 static struct gen_pool *iram_pool;
100 static u32 iram_base;
102 static LIST_HEAD(mem_list);
104 static int vpu_major;
105 static struct class *vpu_class;
106 static struct vpu_priv *vpu_data;
107 static u8 open_count;
108 static struct clk *vpu_clk;
109 static struct vpu_mem_desc bitwork_mem;
110 static struct vpu_mem_desc pic_para_mem;
111 static struct vpu_mem_desc user_data_mem;
112 static struct vpu_mem_desc share_mem;
113 static struct vpu_mem_desc vshare_mem;
115 static void __iomem *vpu_base;
116 static int vpu_ipi_irq;
117 static u32 phy_vpu_base_addr;
119 static struct device *vpu_dev;
122 static struct iram_setting iram;
124 /* implement the blocking ioctl */
125 static int irq_status;
126 static int codec_done;
127 static wait_queue_head_t vpu_queue;
129 static int vpu_jpu_irq;
131 #ifdef CONFIG_PM_SLEEP
132 static unsigned int regBk[64];
133 static unsigned int pc_before_suspend;
135 static struct regulator *vpu_regulator;
137 #define READ_REG(x) readl_relaxed(vpu_base + (x))
138 #define WRITE_REG(val, x) writel_relaxed(val, vpu_base + (x))
140 static int vpu_clk_enable(struct vpu_priv *vpu_data)
142 if (WARN_ON(vpu_data->clk_enabled < 0))
145 if (vpu_data->clk_enabled++ == 0)
146 return clk_prepare_enable(vpu_clk);
151 static int vpu_clk_disable(struct vpu_priv *vpu_data)
153 if (WARN_ON(vpu_data->clk_enabled <= 0))
156 if (--vpu_data->clk_enabled == 0)
157 clk_disable_unprepare(vpu_clk);
161 static inline int vpu_reset(void)
163 return device_reset(vpu_dev);
166 static void vpu_power_up(void)
170 if (IS_ERR(vpu_regulator))
173 ret = regulator_enable(vpu_regulator);
175 dev_err(vpu_dev, "failed to power up vpu: %d\n", ret);
178 static void vpu_power_down(void)
182 if (IS_ERR(vpu_regulator))
185 ret = regulator_disable(vpu_regulator);
187 dev_err(vpu_dev, "failed to power down vpu: %d\n", ret);
191 * Private function to alloc dma buffer
192 * @return status 0 success.
194 static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
196 mem->cpu_addr = dma_alloc_coherent(vpu_dev, PAGE_ALIGN(mem->size),
198 GFP_DMA | GFP_KERNEL);
199 dev_dbg(vpu_dev, "[ALLOC] mem alloc cpu_addr = %p\n", mem->cpu_addr);
200 if (mem->cpu_addr == NULL) {
201 dev_err(vpu_dev, "Physical memory allocation error!\n");
208 * Private function to free dma buffer
210 static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
212 if (mem->cpu_addr != NULL)
213 dma_free_coherent(vpu_dev, PAGE_ALIGN(mem->size),
214 mem->cpu_addr, mem->phy_addr);
218 * Private function to free buffers
219 * @return status 0 success.
221 static int vpu_free_buffers(void)
223 struct memalloc_record *rec, *n;
224 struct vpu_mem_desc mem;
226 list_for_each_entry_safe(rec, n, &mem_list, list) {
228 if (mem.cpu_addr != 0) {
229 vpu_free_dma_buffer(&mem);
230 dev_dbg(vpu_dev, "[FREE] freed paddr=0x%08X\n", mem.phy_addr);
231 /* delete from list */
232 list_del(&rec->list);
240 static inline void vpu_worker_callback(struct work_struct *w)
242 struct vpu_priv *dev = container_of(w, struct vpu_priv, work);
244 if (dev->async_queue)
245 kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
249 * Clock is gated on when dec/enc started, gate it off when
255 wake_up_interruptible(&vpu_queue);
259 * @brief vpu interrupt handler
261 static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
263 struct vpu_priv *dev = dev_id;
266 reg = READ_REG(BIT_INT_REASON);
269 WRITE_REG(0x1, BIT_INT_CLEAR);
271 queue_work(dev->workqueue, &dev->work);
277 * @brief vpu jpu interrupt handler
279 static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
281 struct vpu_priv *dev = dev_id;
284 reg = READ_REG(MJPEG_PIC_STATUS_REG);
288 queue_work(dev->workqueue, &dev->work);
294 * @brief open function for vpu file operation
296 * @return 0 on success or negative error code on error
298 static int vpu_open(struct inode *inode, struct file *filp)
300 struct vpu_user_data *user_data = devm_kzalloc(vpu_dev,
303 if (user_data == NULL)
306 user_data->vpu_data = vpu_data;
308 mutex_lock(&vpu_data->lock);
310 if (open_count++ == 0) {
311 pm_runtime_get_sync(vpu_dev);
315 filp->private_data = user_data;
316 mutex_unlock(&vpu_data->lock);
321 * @brief IO ctrl function for vpu file operation
322 * @param cmd IO ctrl command
323 * @return 0 on success or negative error code on error
325 static long vpu_ioctl(struct file *filp, u_int cmd,
329 struct vpu_user_data *user_data = filp->private_data;
330 struct vpu_priv *vpu_data = user_data->vpu_data;
333 case VPU_IOC_PHYMEM_ALLOC:
335 struct memalloc_record *rec;
337 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
341 if (copy_from_user(&rec->mem,
342 (struct vpu_mem_desc *)arg,
343 sizeof(struct vpu_mem_desc))) {
348 dev_dbg(vpu_dev, "[ALLOC] mem alloc size = 0x%x\n",
351 ret = vpu_alloc_dma_buffer(&rec->mem);
356 if (copy_to_user((void __user *)arg, &rec->mem,
357 sizeof(struct vpu_mem_desc))) {
362 mutex_lock(&vpu_data->lock);
363 list_add(&rec->list, &mem_list);
364 mutex_unlock(&vpu_data->lock);
368 case VPU_IOC_PHYMEM_FREE:
370 struct memalloc_record *rec, *n;
371 struct vpu_mem_desc vpu_mem;
373 if (copy_from_user(&vpu_mem,
374 (struct vpu_mem_desc *)arg,
375 sizeof(struct vpu_mem_desc)))
378 dev_dbg(vpu_dev, "[FREE] mem freed cpu_addr = %p\n",
380 if (vpu_mem.cpu_addr != NULL)
381 vpu_free_dma_buffer(&vpu_mem);
383 mutex_lock(&vpu_data->lock);
384 list_for_each_entry_safe(rec, n, &mem_list, list) {
385 if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
386 list_del(&rec->list);
391 mutex_unlock(&vpu_data->lock);
396 case VPU_IOC_WAIT4INT:
398 u_long timeout = arg;
400 ret = wait_event_interruptible_timeout(vpu_queue,
402 msecs_to_jiffies(timeout));
404 dev_warn(vpu_dev, "VPU blocking: timeout.\n");
406 } else if (signal_pending(current)) {
407 dev_warn(vpu_dev, "VPU interrupt received.\n");
410 ret = irq_status = 0;
414 case VPU_IOC_IRAM_SETTING:
415 ret = copy_to_user((void __user *)arg, &iram,
416 sizeof(struct iram_setting));
421 case VPU_IOC_CLKGATE_SETTING:
425 if (get_user(clkgate_en, (u32 __user *)arg))
428 mutex_lock(&vpu_data->lock);
430 ret = vpu_clk_enable(vpu_data);
432 user_data->clk_enable_cnt++;
434 if (user_data->clk_enable_cnt == 0) {
437 if (--user_data->clk_enable_cnt == 0)
438 vpu_clk_disable(vpu_data);
442 mutex_unlock(&vpu_data->lock);
445 case VPU_IOC_GET_SHARE_MEM:
446 mutex_lock(&vpu_data->lock);
447 if (share_mem.cpu_addr == NULL) {
448 if (copy_from_user(&share_mem,
449 (struct vpu_mem_desc *)arg,
450 sizeof(struct vpu_mem_desc))) {
451 mutex_unlock(&vpu_data->lock);
454 ret = vpu_alloc_dma_buffer(&share_mem);
456 mutex_unlock(&vpu_data->lock);
460 if (copy_to_user((void __user *)arg,
462 sizeof(struct vpu_mem_desc)))
466 mutex_unlock(&vpu_data->lock);
468 case VPU_IOC_REQ_VSHARE_MEM:
469 mutex_lock(&vpu_data->lock);
470 if (vshare_mem.cpu_addr == NULL) {
471 if (copy_from_user(&vshare_mem,
472 (struct vpu_mem_desc *)arg,
475 mutex_unlock(&vpu_data->lock);
478 vshare_mem.cpu_addr = vmalloc_user(vshare_mem.size);
479 if (vshare_mem.cpu_addr == NULL) {
480 mutex_unlock(&vpu_data->lock);
484 if (copy_to_user((void __user *)arg, &vshare_mem,
485 sizeof(struct vpu_mem_desc)))
489 mutex_unlock(&vpu_data->lock);
491 case VPU_IOC_GET_WORK_ADDR:
492 if (bitwork_mem.cpu_addr == 0) {
493 if (copy_from_user(&bitwork_mem,
494 (struct vpu_mem_desc *)arg,
495 sizeof(struct vpu_mem_desc)))
498 ret = vpu_alloc_dma_buffer(&bitwork_mem);
502 if (copy_to_user((void __user *)arg,
511 * The following two ioctls are used when user allocates a working buffer
512 * and registers it to vpu driver.
514 case VPU_IOC_QUERY_BITWORK_MEM:
515 if (copy_to_user((void __user *)arg,
517 sizeof(struct vpu_mem_desc)))
522 case VPU_IOC_SET_BITWORK_MEM:
523 if (copy_from_user(&bitwork_mem,
524 (struct vpu_mem_desc *)arg,
525 sizeof(struct vpu_mem_desc)))
530 case VPU_IOC_SYS_SW_RESET:
533 case VPU_IOC_REG_DUMP:
534 case VPU_IOC_PHYMEM_DUMP:
537 case VPU_IOC_PHYMEM_CHECK:
539 struct vpu_mem_desc check_memory;
541 if (copy_from_user(&check_memory, (void __user *)arg,
542 sizeof(struct vpu_mem_desc)))
545 check_memory.size = 1;
546 if (copy_to_user((void __user *)arg, &check_memory,
547 sizeof(struct vpu_mem_desc)))
553 case VPU_IOC_LOCK_DEV:
557 if (get_user(lock_en, (u32 __user *)arg))
561 mutex_lock(&vpu_data->lock);
563 mutex_unlock(&vpu_data->lock);
568 dev_err(vpu_dev, "No such IOCTL, cmd is %d\n", cmd);
575 * @brief Release function for vpu file operation
576 * @return 0 on success or negative error code on error
578 static int vpu_release(struct inode *inode, struct file *filp)
580 unsigned long timeout;
581 struct vpu_user_data *user_data = filp->private_data;
582 struct vpu_priv *vpu_data = user_data->vpu_data;
584 mutex_lock(&vpu_data->lock);
586 if (open_count > 0 && !--open_count) {
587 /* Wait for vpu go to idle state */
588 vpu_clk_enable(vpu_data);
589 if (READ_REG(BIT_CUR_PC)) {
591 timeout = jiffies + HZ;
592 while (READ_REG(BIT_BUSY_FLAG)) {
594 if (time_after(jiffies, timeout)) {
595 dev_warn(vpu_dev, "VPU timeout during release\n");
600 /* Clean up interrupt */
601 cancel_work_sync(&vpu_data->work);
602 flush_workqueue(vpu_data->workqueue);
605 if (READ_REG(BIT_BUSY_FLAG)) {
606 if (vpu_data->soc_data->is_mx51 ||
607 vpu_data->soc_data->is_mx53) {
609 "fatal error: can't gate/power off when VPU is busy\n");
610 vpu_clk_disable(vpu_data);
611 mutex_unlock(&vpu_data->lock);
614 if (vpu_data->soc_data->is_mx6dl ||
615 vpu_data->soc_data->is_mx6q) {
616 WRITE_REG(0x11, 0x10F0);
617 timeout = jiffies + HZ;
618 while (READ_REG(0x10F4) != 0x77) {
620 if (time_after(jiffies, timeout))
624 if (READ_REG(0x10F4) != 0x77) {
626 "fatal error: can't gate/power off when VPU is busy\n");
627 WRITE_REG(0x0, 0x10F0);
628 vpu_clk_disable(vpu_data);
629 mutex_unlock(&vpu_data->lock);
639 /* Free shared memory when vpu device is idle */
640 vpu_free_dma_buffer(&share_mem);
641 share_mem.cpu_addr = 0;
642 vfree(vshare_mem.cpu_addr);
643 vshare_mem.cpu_addr = 0;
645 if (user_data->clk_enable_cnt)
646 vpu_clk_disable(vpu_data);
648 vpu_clk_disable(vpu_data);
650 pm_runtime_put_sync_suspend(vpu_dev);
651 devm_kfree(vpu_dev, user_data);
653 mutex_unlock(&vpu_data->lock);
659 * @brief fasync function for vpu file operation
660 * @return 0 on success or negative error code on error
662 static int vpu_fasync(int fd, struct file *filp, int mode)
664 struct vpu_user_data *user_data = filp->private_data;
665 struct vpu_priv *vpu_data = user_data->vpu_data;
666 return fasync_helper(fd, filp, mode, &vpu_data->async_queue);
670 * @brief memory map function of harware registers for vpu file operation
671 * @return 0 on success or negative error code on error
673 static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
677 vm->vm_flags |= VM_IO;
679 * Since vpu registers have been mapped with ioremap() at probe
680 * which L_PTE_XN is 1, and the same physical address must be
681 * mapped multiple times with same type, so set L_PTE_XN to 1 here.
682 * Otherwise, there may be unexpected result in video codec.
684 vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
685 pfn = phy_vpu_base_addr >> PAGE_SHIFT;
686 dev_dbg(vpu_dev, "size=0x%08lx, page no.=0x%08lx\n",
687 vm->vm_end - vm->vm_start, pfn);
688 return remap_pfn_range(vm, vm->vm_start, pfn,
689 vm->vm_end - vm->vm_start,
690 vm->vm_page_prot) ? -EAGAIN : 0;
694 * @brief memory map function of memory for vpu file operation
695 * @return 0 on success or negative error code on error
697 static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
699 size_t request_size = vm->vm_end - vm->vm_start;
701 dev_dbg(vpu_dev, "start=0x%08lx, pgoff=0x%08lx, size=%zx\n",
702 vm->vm_start, vm->vm_pgoff, request_size);
704 vm->vm_flags |= VM_IO;
705 vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
707 return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
708 request_size, vm->vm_page_prot) ? -EAGAIN : 0;
712 * @brief memory map function of vmalloced share memory
713 * @return 0 on success or negative error code on error
715 static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
719 ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
720 vm->vm_flags |= VM_IO;
724 * @brief memory map interface for vpu file operation
725 * @return 0 on success or negative error code on error
727 static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
729 unsigned long offset;
731 offset = (unsigned long)vshare_mem.cpu_addr >> PAGE_SHIFT;
733 if (vm->vm_pgoff && (vm->vm_pgoff == offset))
734 return vpu_map_vshare_mem(fp, vm);
735 else if (vm->vm_pgoff)
736 return vpu_map_dma_mem(fp, vm);
738 return vpu_map_hwregs(fp, vm);
741 static const struct file_operations vpu_fops = {
742 .owner = THIS_MODULE,
744 .unlocked_ioctl = vpu_ioctl,
745 .release = vpu_release,
746 .fasync = vpu_fasync,
750 static const struct mxc_vpu_soc_data imx6dl_vpu_data = {
751 .regulator_required = 1,
756 static const struct mxc_vpu_soc_data imx6q_vpu_data = {
757 .quirk_subblk_en = 1,
758 .regulator_required = 1,
764 static const struct mxc_vpu_soc_data imx53_vpu_data = {
767 static const struct mxc_vpu_soc_data imx51_vpu_data = {
771 static const struct of_device_id vpu_of_match[] = {
772 { .compatible = "fsl,imx6dl-vpu", .data = &imx6dl_vpu_data, },
773 { .compatible = "fsl,imx6q-vpu", .data = &imx6q_vpu_data, },
774 { .compatible = "fsl,imx53-vpu", .data = &imx53_vpu_data, },
775 { .compatible = "fsl,imx51-vpu", .data = &imx51_vpu_data, },
778 MODULE_DEVICE_TABLE(of, vpu_of_match);
781 * This function is called by the driver framework to initialize the vpu device.
782 * @param dev The device structure for the vpu passed in by the framework.
783 * @return 0 on success or negative error code on error
785 static int vpu_dev_probe(struct platform_device *pdev)
788 struct device *temp_class;
789 struct resource *res;
790 struct device_node *np = pdev->dev.of_node;
791 struct vpu_priv *drv_data;
792 const struct of_device_id *of_id = of_match_device(vpu_of_match,
794 const struct mxc_vpu_soc_data *soc_data = of_id->data;
796 drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
797 if (drv_data == NULL)
800 drv_data->soc_data = soc_data;
801 mutex_init(&drv_data->lock);
803 init_waitqueue_head(&vpu_queue);
804 drv_data->workqueue = create_workqueue("vpu_wq");
805 INIT_WORK(&drv_data->work, vpu_worker_callback);
807 if (soc_data->iramsize) {
808 iram_pool = of_get_named_gen_pool(np, "iram", 0);
810 dev_err(&pdev->dev, "iram pool not available\n");
814 iram_base = gen_pool_alloc(iram_pool, soc_data->iramsize);
816 dev_err(&pdev->dev, "unable to alloc iram\n");
820 iram.start = gen_pool_virt_to_phys(iram_pool, iram_base);
821 iram.end = iram.start + soc_data->iramsize - 1;
824 vpu_dev = &pdev->dev;
826 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
827 vpu_base = devm_ioremap_resource(&pdev->dev, res);
828 if (IS_ERR(vpu_base))
829 return PTR_ERR(vpu_base);
830 phy_vpu_base_addr = res->start;
832 vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
834 dev_err(vpu_dev, "vpu: unable to get a major for VPU\n");
838 vpu_class = class_create(THIS_MODULE, "mxc_vpu");
839 if (IS_ERR(vpu_class)) {
840 err = PTR_ERR(vpu_class);
844 temp_class = device_create(vpu_class, NULL, MKDEV(vpu_major, 0),
846 if (IS_ERR(temp_class)) {
847 err = PTR_ERR(temp_class);
851 vpu_clk = clk_get(&pdev->dev, "vpu_clk");
852 if (IS_ERR(vpu_clk)) {
853 err = PTR_ERR(vpu_clk);
857 vpu_ipi_irq = platform_get_irq_byname(pdev, "vpu_ipi_irq");
858 if (vpu_ipi_irq < 0) {
859 dev_err(vpu_dev, "vpu: unable to get vpu interrupt\n");
863 err = devm_request_irq(vpu_dev, vpu_ipi_irq, vpu_ipi_irq_handler, 0,
864 "VPU_CODEC_IRQ", drv_data);
868 vpu_regulator = devm_regulator_get(vpu_dev, "pu");
869 if (IS_ERR(vpu_regulator)) {
870 if (drv_data->soc_data->regulator_required) {
871 dev_err(vpu_dev, "failed to get vpu power\n");
874 /* regulator_get will return error on MX5x,
875 * just igore it everywhere
877 dev_warn(vpu_dev, "failed to get vpu power\n");
881 platform_set_drvdata(pdev, drv_data);
883 if (drv_data->soc_data->has_jpu) {
884 vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
885 if (vpu_jpu_irq < 0) {
886 dev_err(vpu_dev, "vpu: unable to get vpu jpu interrupt\n");
890 err = devm_request_irq(vpu_dev, vpu_jpu_irq,
891 vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
892 "VPU_JPG_IRQ", drv_data);
897 pm_runtime_enable(&pdev->dev);
900 dev_info(vpu_dev, "VPU initialized\n");
904 device_destroy(vpu_class, MKDEV(vpu_major, 0));
905 class_destroy(vpu_class);
907 unregister_chrdev(vpu_major, "mxc_vpu");
911 static int vpu_dev_remove(struct platform_device *pdev)
913 struct vpu_priv *vpu_data = platform_get_drvdata(pdev);
915 pm_runtime_disable(&pdev->dev);
917 cancel_work_sync(&vpu_data->work);
918 flush_workqueue(vpu_data->workqueue);
919 destroy_workqueue(vpu_data->workqueue);
922 gen_pool_free(iram_pool, iram_base, iram.end-iram.start+1);
925 device_destroy(vpu_class, MKDEV(vpu_major, 0));
926 class_destroy(vpu_class);
927 unregister_chrdev(vpu_major, "mxc_vpu");
931 vpu_free_dma_buffer(&bitwork_mem);
932 vpu_free_dma_buffer(&pic_para_mem);
933 vpu_free_dma_buffer(&user_data_mem);
935 /* reset VPU state */
937 vpu_clk_enable(vpu_data);
939 vpu_clk_disable(vpu_data);
946 #ifdef CONFIG_PM_SLEEP
947 static int vpu_suspend(struct device *dev)
949 struct vpu_priv *vpu_data = dev_get_drvdata(dev);
950 unsigned long timeout;
952 mutex_lock(&vpu_data->lock);
955 /* Wait for vpu go to idle state, suspect vpu cannot be changed
956 * to idle state after about 1 sec
958 timeout = jiffies + HZ;
959 while (READ_REG(BIT_BUSY_FLAG)) {
961 if (time_after(jiffies, timeout)) {
962 mutex_unlock(&vpu_data->lock);
967 if (vpu_data->soc_data->is_mx53) {
968 mutex_unlock(&vpu_data->lock);
972 if (bitwork_mem.cpu_addr != 0) {
975 /* Save 64 registers from BIT_CODE_BUF_ADDR */
976 for (i = 0; i < 64; i++)
977 regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
978 pc_before_suspend = READ_REG(BIT_CUR_PC);
981 vpu_clk_disable(vpu_data);
982 /* If VPU is working before suspend, disable
983 * regulator to make usecount right.
988 mutex_unlock(&vpu_data->lock);
992 static int vpu_resume(struct device *dev)
995 struct vpu_priv *vpu_data = dev_get_drvdata(dev);
997 mutex_lock(&vpu_data->lock);
1000 if (vpu_data->soc_data->is_mx53) {
1001 vpu_clk_enable(vpu_data);
1005 /* If VPU is working before suspend, enable
1006 * regulator to make usecount right.
1010 if (bitwork_mem.cpu_addr != NULL) {
1011 u32 *p = bitwork_mem.cpu_addr;
1016 vpu_clk_enable(vpu_data);
1018 pc = READ_REG(BIT_CUR_PC);
1020 dev_warn(vpu_dev, "Not power off after suspend (PC=0x%x)\n", pc);
1024 /* Restore registers */
1025 for (i = 0; i < 64; i++)
1026 WRITE_REG(regBk[i], BIT_CODE_BUF_ADDR + (i * 4));
1028 WRITE_REG(0x0, BIT_RESET_CTRL);
1029 WRITE_REG(0x0, BIT_CODE_RUN);
1031 /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
1032 if (vpu_data->soc_data->quirk_subblk_en)
1033 WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
1036 * Re-load boot code, from the codebuffer in external RAM.
1037 * Thankfully, we only need 4096 bytes, same for all platforms.
1039 for (i = 0; i < 2048; i += 4) {
1040 data = p[(i / 2) + 1];
1041 data_hi = (data >> 16) & 0xFFFF;
1042 data_lo = data & 0xFFFF;
1043 WRITE_REG((i << 16) | data_hi, BIT_CODE_DOWN);
1044 WRITE_REG(((i + 1) << 16) | data_lo,
1048 data_hi = (data >> 16) & 0xFFFF;
1049 data_lo = data & 0xFFFF;
1050 WRITE_REG(((i + 2) << 16) | data_hi,
1052 WRITE_REG(((i + 3) << 16) | data_lo,
1056 if (pc_before_suspend) {
1057 WRITE_REG(0x1, BIT_BUSY_FLAG);
1058 WRITE_REG(0x1, BIT_CODE_RUN);
1059 while (READ_REG(BIT_BUSY_FLAG))
1062 dev_warn(vpu_dev, "PC=0 before suspend\n");
1067 mutex_unlock(&vpu_data->lock);
1071 static SIMPLE_DEV_PM_OPS(vpu_pm_ops, vpu_suspend, vpu_resume);
1072 #define VPU_PM_OPS &vpu_pm_ops
1074 #define VPU_PM_OPS NULL
1075 #endif /* !CONFIG_PM_SLEEP */
1077 /*! Driver definition
1080 static struct platform_driver mxcvpu_driver = {
1083 .of_match_table = vpu_of_match,
1086 .probe = vpu_dev_probe,
1087 .remove = vpu_dev_remove,
1090 module_platform_driver(mxcvpu_driver);
1092 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1093 MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
1094 MODULE_LICENSE("GPL");