2 * Copyright 2006-2013 Freescale Semiconductor, Inc. All Rights Reserved.
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
17 * @brief VPU system initialization and file operation implementation
22 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/stat.h>
27 #include <linux/platform_device.h>
28 #include <linux/kdev_t.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/wait.h>
31 #include <linux/list.h>
32 #include <linux/clk.h>
33 #include <linux/delay.h>
34 #include <linux/fsl_devices.h>
35 #include <linux/uaccess.h>
37 #include <linux/slab.h>
38 #include <linux/workqueue.h>
39 #include <linux/sched.h>
40 #include <linux/vmalloc.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/page-flags.h>
43 #include <linux/mm_types.h>
44 #include <linux/types.h>
45 #include <linux/memblock.h>
46 #include <linux/memory.h>
47 #include <linux/version.h>
48 #include <linux/module.h>
49 #include <linux/pm_runtime.h>
50 #include <linux/sizes.h>
51 #include <linux/genalloc.h>
53 #include <linux/of_device.h>
54 #include <linux/reset.h>
55 #include <linux/clk.h>
56 #include <linux/mxc_vpu.h>
58 /* Define one new pgprot which combined uncached and XN(never executable) */
59 #define pgprot_noncachedxn(prot) \
60 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
63 struct fasync_struct *async_queue;
64 struct work_struct work;
65 struct workqueue_struct *workqueue;
67 const struct mxc_vpu_soc_data *soc_data;
71 struct vpu_user_data {
72 struct vpu_priv *vpu_data;
76 /* To track the allocated memory buffer */
77 struct memalloc_record {
78 struct list_head list;
79 struct vpu_mem_desc mem;
87 struct mxc_vpu_soc_data {
88 unsigned vpu_pwr_mgmnt:1,
98 static struct gen_pool *iram_pool;
101 static LIST_HEAD(mem_list);
103 static int vpu_major;
104 static struct class *vpu_class;
105 static struct vpu_priv *vpu_data;
106 static u8 open_count;
107 static struct clk *vpu_clk;
108 static struct vpu_mem_desc bitwork_mem;
109 static struct vpu_mem_desc pic_para_mem;
110 static struct vpu_mem_desc user_data_mem;
111 static struct vpu_mem_desc share_mem;
112 static struct vpu_mem_desc vshare_mem;
114 static void __iomem *vpu_base;
115 static int vpu_ipi_irq;
116 static u32 phy_vpu_base_addr;
118 static struct device *vpu_dev;
121 static struct iram_setting iram;
123 /* implement the blocking ioctl */
124 static int irq_status;
125 static int codec_done;
126 static wait_queue_head_t vpu_queue;
128 static int vpu_jpu_irq;
130 #ifdef CONFIG_PM_SLEEP
131 static unsigned int regBk[64];
132 static unsigned int pc_before_suspend;
134 static struct regulator *vpu_regulator;
136 #define READ_REG(x) readl_relaxed(vpu_base + (x))
137 #define WRITE_REG(val, x) writel_relaxed(val, vpu_base + (x))
139 static int vpu_clk_enable(struct vpu_priv *vpu_data)
141 if (WARN_ON(vpu_data->clk_enabled < 0))
144 if (vpu_data->clk_enabled++ == 0)
145 return clk_prepare_enable(vpu_clk);
150 static int vpu_clk_disable(struct vpu_priv *vpu_data)
152 if (WARN_ON(vpu_data->clk_enabled <= 0))
155 if (--vpu_data->clk_enabled == 0)
156 clk_disable_unprepare(vpu_clk);
160 static inline int vpu_reset(void)
162 return device_reset(vpu_dev);
165 static void vpu_power_up(void)
169 if (IS_ERR(vpu_regulator))
172 ret = regulator_enable(vpu_regulator);
174 dev_err(vpu_dev, "failed to power up vpu: %d\n", ret);
177 static void vpu_power_down(void)
181 if (IS_ERR(vpu_regulator))
184 ret = regulator_disable(vpu_regulator);
186 dev_err(vpu_dev, "failed to power down vpu: %d\n", ret);
190 * Private function to alloc dma buffer
191 * @return status 0 success.
193 static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
195 mem->cpu_addr = dma_alloc_coherent(vpu_dev, PAGE_ALIGN(mem->size),
197 GFP_DMA | GFP_KERNEL);
198 dev_dbg(vpu_dev, "[ALLOC] mem alloc cpu_addr = %p\n", mem->cpu_addr);
199 if (mem->cpu_addr == NULL) {
200 dev_err(vpu_dev, "Physical memory allocation error!\n");
207 * Private function to free dma buffer
209 static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
211 if (mem->cpu_addr != NULL)
212 dma_free_coherent(vpu_dev, PAGE_ALIGN(mem->size),
213 mem->cpu_addr, mem->phy_addr);
217 * Private function to free buffers
218 * @return status 0 success.
220 static int vpu_free_buffers(void)
222 struct memalloc_record *rec, *n;
223 struct vpu_mem_desc mem;
225 list_for_each_entry_safe(rec, n, &mem_list, list) {
227 if (mem.cpu_addr != 0) {
228 vpu_free_dma_buffer(&mem);
229 dev_dbg(vpu_dev, "[FREE] freed paddr=0x%08X\n", mem.phy_addr);
230 /* delete from list */
231 list_del(&rec->list);
239 static inline void vpu_worker_callback(struct work_struct *w)
241 struct vpu_priv *dev = container_of(w, struct vpu_priv, work);
243 if (dev->async_queue)
244 kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
248 * Clock is gated on when dec/enc started, gate it off when
254 wake_up_interruptible(&vpu_queue);
258 * @brief vpu interrupt handler
260 static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
262 struct vpu_priv *dev = dev_id;
265 reg = READ_REG(BIT_INT_REASON);
268 WRITE_REG(0x1, BIT_INT_CLEAR);
270 queue_work(dev->workqueue, &dev->work);
276 * @brief vpu jpu interrupt handler
278 static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
280 struct vpu_priv *dev = dev_id;
283 reg = READ_REG(MJPEG_PIC_STATUS_REG);
287 queue_work(dev->workqueue, &dev->work);
293 * @brief open function for vpu file operation
295 * @return 0 on success or negative error code on error
297 static int vpu_open(struct inode *inode, struct file *filp)
299 struct vpu_user_data *user_data = devm_kzalloc(vpu_dev,
302 if (user_data == NULL)
305 user_data->vpu_data = vpu_data;
307 mutex_lock(&vpu_data->lock);
309 if (open_count++ == 0) {
310 pm_runtime_get_sync(vpu_dev);
314 filp->private_data = user_data;
315 mutex_unlock(&vpu_data->lock);
320 * @brief IO ctrl function for vpu file operation
321 * @param cmd IO ctrl command
322 * @return 0 on success or negative error code on error
324 static long vpu_ioctl(struct file *filp, u_int cmd,
328 struct vpu_user_data *user_data = filp->private_data;
329 struct vpu_priv *vpu_data = user_data->vpu_data;
332 case VPU_IOC_PHYMEM_ALLOC:
334 struct memalloc_record *rec;
336 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
340 if (copy_from_user(&rec->mem,
341 (struct vpu_mem_desc *)arg,
342 sizeof(struct vpu_mem_desc))) {
347 dev_dbg(vpu_dev, "[ALLOC] mem alloc size = 0x%x\n",
350 ret = vpu_alloc_dma_buffer(&rec->mem);
355 if (copy_to_user((void __user *)arg, &rec->mem,
356 sizeof(struct vpu_mem_desc))) {
361 mutex_lock(&vpu_data->lock);
362 list_add(&rec->list, &mem_list);
363 mutex_unlock(&vpu_data->lock);
367 case VPU_IOC_PHYMEM_FREE:
369 struct memalloc_record *rec, *n;
370 struct vpu_mem_desc vpu_mem;
372 if (copy_from_user(&vpu_mem,
373 (struct vpu_mem_desc *)arg,
374 sizeof(struct vpu_mem_desc)))
377 dev_dbg(vpu_dev, "[FREE] mem freed cpu_addr = %p\n",
379 if (vpu_mem.cpu_addr != NULL)
380 vpu_free_dma_buffer(&vpu_mem);
382 mutex_lock(&vpu_data->lock);
383 list_for_each_entry_safe(rec, n, &mem_list, list) {
384 if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
385 list_del(&rec->list);
390 mutex_unlock(&vpu_data->lock);
394 case VPU_IOC_WAIT4INT:
396 u_long timeout = arg;
398 ret = wait_event_interruptible_timeout(vpu_queue,
400 msecs_to_jiffies(timeout));
402 dev_warn(vpu_dev, "VPU blocking: timeout.\n");
404 } else if (signal_pending(current)) {
405 dev_warn(vpu_dev, "VPU interrupt received.\n");
412 case VPU_IOC_IRAM_SETTING:
413 ret = copy_to_user((void __user *)arg, &iram,
414 sizeof(struct iram_setting));
419 case VPU_IOC_CLKGATE_SETTING:
423 if (get_user(clkgate_en, (u32 __user *)arg))
426 mutex_lock(&vpu_data->lock);
428 ret = vpu_clk_enable(vpu_data);
430 user_data->clk_enable_cnt++;
432 if (user_data->clk_enable_cnt == 0) {
435 if (--user_data->clk_enable_cnt == 0)
436 vpu_clk_disable(vpu_data);
440 mutex_unlock(&vpu_data->lock);
443 case VPU_IOC_GET_SHARE_MEM:
444 mutex_lock(&vpu_data->lock);
445 if (share_mem.cpu_addr == NULL) {
446 if (copy_from_user(&share_mem,
447 (struct vpu_mem_desc *)arg,
448 sizeof(struct vpu_mem_desc))) {
449 mutex_unlock(&vpu_data->lock);
452 ret = vpu_alloc_dma_buffer(&share_mem);
454 mutex_unlock(&vpu_data->lock);
458 if (copy_to_user((void __user *)arg,
460 sizeof(struct vpu_mem_desc)))
464 mutex_unlock(&vpu_data->lock);
466 case VPU_IOC_REQ_VSHARE_MEM:
467 mutex_lock(&vpu_data->lock);
468 if (vshare_mem.cpu_addr == NULL) {
469 if (copy_from_user(&vshare_mem,
470 (struct vpu_mem_desc *)arg,
473 mutex_unlock(&vpu_data->lock);
476 vshare_mem.cpu_addr = vmalloc_user(vshare_mem.size);
477 if (vshare_mem.cpu_addr == NULL) {
478 mutex_unlock(&vpu_data->lock);
482 if (copy_to_user((void __user *)arg, &vshare_mem,
483 sizeof(struct vpu_mem_desc)))
487 mutex_unlock(&vpu_data->lock);
489 case VPU_IOC_GET_WORK_ADDR:
490 if (bitwork_mem.cpu_addr == 0) {
491 if (copy_from_user(&bitwork_mem,
492 (struct vpu_mem_desc *)arg,
493 sizeof(struct vpu_mem_desc)))
496 ret = vpu_alloc_dma_buffer(&bitwork_mem);
500 if (copy_to_user((void __user *)arg,
509 * The following two ioctls are used when user allocates a working buffer
510 * and registers it to vpu driver.
512 case VPU_IOC_QUERY_BITWORK_MEM:
513 if (copy_to_user((void __user *)arg,
515 sizeof(struct vpu_mem_desc)))
520 case VPU_IOC_SET_BITWORK_MEM:
521 if (copy_from_user(&bitwork_mem,
522 (struct vpu_mem_desc *)arg,
523 sizeof(struct vpu_mem_desc)))
528 case VPU_IOC_SYS_SW_RESET:
531 case VPU_IOC_REG_DUMP:
532 case VPU_IOC_PHYMEM_DUMP:
535 case VPU_IOC_PHYMEM_CHECK:
537 struct vpu_mem_desc check_memory;
539 if (copy_from_user(&check_memory, (void __user *)arg,
540 sizeof(struct vpu_mem_desc)))
543 check_memory.size = 1;
544 if (copy_to_user((void __user *)arg, &check_memory,
545 sizeof(struct vpu_mem_desc)))
551 case VPU_IOC_LOCK_DEV:
555 if (get_user(lock_en, (u32 __user *)arg))
559 mutex_lock(&vpu_data->lock);
561 mutex_unlock(&vpu_data->lock);
566 dev_err(vpu_dev, "No such IOCTL, cmd is %d\n", cmd);
572 * @brief Release function for vpu file operation
573 * @return 0 on success or negative error code on error
575 static int vpu_release(struct inode *inode, struct file *filp)
577 unsigned long timeout;
578 struct vpu_user_data *user_data = filp->private_data;
579 struct vpu_priv *vpu_data = user_data->vpu_data;
581 mutex_lock(&vpu_data->lock);
583 if (open_count > 0 && !--open_count) {
584 /* Wait for vpu go to idle state */
585 vpu_clk_enable(vpu_data);
586 if (READ_REG(BIT_CUR_PC)) {
588 timeout = jiffies + HZ;
589 while (READ_REG(BIT_BUSY_FLAG)) {
591 if (time_after(jiffies, timeout)) {
592 dev_warn(vpu_dev, "VPU timeout during release\n");
597 /* Clean up interrupt */
598 cancel_work_sync(&vpu_data->work);
599 flush_workqueue(vpu_data->workqueue);
602 if (READ_REG(BIT_BUSY_FLAG)) {
603 if (vpu_data->soc_data->is_mx51 ||
604 vpu_data->soc_data->is_mx53) {
606 "fatal error: can't gate/power off when VPU is busy\n");
607 vpu_clk_disable(vpu_data);
608 mutex_unlock(&vpu_data->lock);
611 if (vpu_data->soc_data->is_mx6dl ||
612 vpu_data->soc_data->is_mx6q) {
613 WRITE_REG(0x11, 0x10F0);
614 timeout = jiffies + HZ;
615 while (READ_REG(0x10F4) != 0x77) {
617 if (time_after(jiffies, timeout))
621 if (READ_REG(0x10F4) != 0x77) {
623 "fatal error: can't gate/power off when VPU is busy\n");
624 WRITE_REG(0x0, 0x10F0);
625 vpu_clk_disable(vpu_data);
626 mutex_unlock(&vpu_data->lock);
636 /* Free shared memory when vpu device is idle */
637 vpu_free_dma_buffer(&share_mem);
638 share_mem.cpu_addr = 0;
639 vfree(vshare_mem.cpu_addr);
640 vshare_mem.cpu_addr = 0;
642 if (user_data->clk_enable_cnt)
643 vpu_clk_disable(vpu_data);
645 vpu_clk_disable(vpu_data);
647 pm_runtime_put_sync_suspend(vpu_dev);
648 devm_kfree(vpu_dev, user_data);
650 mutex_unlock(&vpu_data->lock);
656 * @brief fasync function for vpu file operation
657 * @return 0 on success or negative error code on error
659 static int vpu_fasync(int fd, struct file *filp, int mode)
661 struct vpu_user_data *user_data = filp->private_data;
662 struct vpu_priv *vpu_data = user_data->vpu_data;
663 return fasync_helper(fd, filp, mode, &vpu_data->async_queue);
667 * @brief memory map function of harware registers for vpu file operation
668 * @return 0 on success or negative error code on error
670 static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
674 vm->vm_flags |= VM_IO;
676 * Since vpu registers have been mapped with ioremap() at probe
677 * which L_PTE_XN is 1, and the same physical address must be
678 * mapped multiple times with same type, so set L_PTE_XN to 1 here.
679 * Otherwise, there may be unexpected result in video codec.
681 vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
682 pfn = phy_vpu_base_addr >> PAGE_SHIFT;
683 dev_dbg(vpu_dev, "size=0x%08lx, page no.=0x%08lx\n",
684 vm->vm_end - vm->vm_start, pfn);
685 return remap_pfn_range(vm, vm->vm_start, pfn,
686 vm->vm_end - vm->vm_start,
687 vm->vm_page_prot) ? -EAGAIN : 0;
691 * @brief memory map function of memory for vpu file operation
692 * @return 0 on success or negative error code on error
694 static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
696 size_t request_size = vm->vm_end - vm->vm_start;
698 dev_dbg(vpu_dev, "start=0x%08lx, pgoff=0x%08lx, size=%zx\n",
699 vm->vm_start, vm->vm_pgoff, request_size);
701 vm->vm_flags |= VM_IO;
702 vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
704 return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
705 request_size, vm->vm_page_prot) ? -EAGAIN : 0;
709 * @brief memory map function of vmalloced share memory
710 * @return 0 on success or negative error code on error
712 static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
716 ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
717 vm->vm_flags |= VM_IO;
721 * @brief memory map interface for vpu file operation
722 * @return 0 on success or negative error code on error
724 static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
726 unsigned long offset;
728 offset = (unsigned long)vshare_mem.cpu_addr >> PAGE_SHIFT;
730 if (vm->vm_pgoff && (vm->vm_pgoff == offset))
731 return vpu_map_vshare_mem(fp, vm);
732 else if (vm->vm_pgoff)
733 return vpu_map_dma_mem(fp, vm);
735 return vpu_map_hwregs(fp, vm);
738 static const struct file_operations vpu_fops = {
739 .owner = THIS_MODULE,
741 .unlocked_ioctl = vpu_ioctl,
742 .release = vpu_release,
743 .fasync = vpu_fasync,
747 static const struct mxc_vpu_soc_data imx6dl_vpu_data = {
748 .regulator_required = 1,
753 static const struct mxc_vpu_soc_data imx6q_vpu_data = {
754 .quirk_subblk_en = 1,
755 .regulator_required = 1,
760 static const struct mxc_vpu_soc_data imx53_vpu_data = {
763 static const struct mxc_vpu_soc_data imx51_vpu_data = {
767 static const struct of_device_id vpu_of_match[] = {
768 { .compatible = "fsl,imx6dl-vpu", .data = &imx6dl_vpu_data, },
769 { .compatible = "fsl,imx6q-vpu", .data = &imx6q_vpu_data, },
770 { .compatible = "fsl,imx53-vpu", .data = &imx53_vpu_data, },
771 { .compatible = "fsl,imx51-vpu", .data = &imx51_vpu_data, },
774 MODULE_DEVICE_TABLE(of, vpu_of_match);
777 * This function is called by the driver framework to initialize the vpu device.
778 * @param dev The device structure for the vpu passed in by the framework.
779 * @return 0 on success or negative error code on error
781 static int vpu_dev_probe(struct platform_device *pdev)
784 struct device *temp_class;
785 struct resource *res;
786 unsigned long addr = 0;
787 struct device_node *np = pdev->dev.of_node;
789 struct vpu_priv *drv_data;
790 const struct of_device_id *of_id = of_match_device(vpu_of_match,
792 const struct mxc_vpu_soc_data *soc_data = of_id->data;
794 drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
795 if (drv_data == NULL)
798 drv_data->soc_data = soc_data;
799 mutex_init(&drv_data->lock);
801 init_waitqueue_head(&vpu_queue);
802 drv_data->workqueue = create_workqueue("vpu_wq");
803 INIT_WORK(&drv_data->work, vpu_worker_callback);
805 err = of_property_read_u32(np, "iramsize", &iramsize);
806 if (!err && iramsize) {
807 iram_pool = of_get_named_gen_pool(np, "iram", 0);
809 dev_err(&pdev->dev, "iram pool not available\n");
813 iram_base = gen_pool_alloc(iram_pool, iramsize);
815 dev_err(&pdev->dev, "unable to alloc iram\n");
819 addr = gen_pool_virt_to_phys(iram_pool, iram_base);
823 iram.start = iram.end = 0;
826 iram.end = addr + iramsize - 1;
829 vpu_dev = &pdev->dev;
831 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
832 vpu_base = devm_ioremap_resource(&pdev->dev, res);
833 if (IS_ERR(vpu_base))
834 return PTR_ERR(vpu_base);
835 phy_vpu_base_addr = res->start;
837 vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
839 dev_err(vpu_dev, "vpu: unable to get a major for VPU\n");
843 vpu_class = class_create(THIS_MODULE, "mxc_vpu");
844 if (IS_ERR(vpu_class)) {
845 err = PTR_ERR(vpu_class);
849 temp_class = device_create(vpu_class, NULL, MKDEV(vpu_major, 0),
851 if (IS_ERR(temp_class)) {
852 err = PTR_ERR(temp_class);
856 vpu_clk = clk_get(&pdev->dev, "vpu_clk");
857 if (IS_ERR(vpu_clk)) {
858 err = PTR_ERR(vpu_clk);
862 vpu_ipi_irq = platform_get_irq_byname(pdev, "vpu_ipi_irq");
863 if (vpu_ipi_irq < 0) {
864 dev_err(vpu_dev, "vpu: unable to get vpu interrupt\n");
868 err = request_irq(vpu_ipi_irq, vpu_ipi_irq_handler, 0, "VPU_CODEC_IRQ",
873 vpu_regulator = devm_regulator_get(vpu_dev, "pu");
874 if (IS_ERR(vpu_regulator)) {
875 if (drv_data->soc_data->regulator_required) {
876 dev_err(vpu_dev, "failed to get vpu power\n");
879 /* regulator_get will return error on MX5x,
880 * just igore it everywhere
882 dev_warn(vpu_dev, "failed to get vpu power\n");
886 platform_set_drvdata(pdev, drv_data);
888 if (drv_data->soc_data->has_jpu) {
889 vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
890 if (vpu_jpu_irq < 0) {
891 dev_err(vpu_dev, "vpu: unable to get vpu jpu interrupt\n");
895 err = request_irq(vpu_jpu_irq, vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
896 "VPU_JPG_IRQ", drv_data);
901 pm_runtime_enable(&pdev->dev);
904 dev_info(vpu_dev, "VPU initialized\n");
908 device_destroy(vpu_class, MKDEV(vpu_major, 0));
909 class_destroy(vpu_class);
911 unregister_chrdev(vpu_major, "mxc_vpu");
915 static int vpu_dev_remove(struct platform_device *pdev)
917 struct vpu_priv *vpu_data = platform_get_drvdata(pdev);
919 pm_runtime_disable(&pdev->dev);
921 free_irq(vpu_ipi_irq, &vpu_data);
922 #ifdef MXC_VPU_HAS_JPU
923 free_irq(vpu_jpu_irq, &vpu_data);
925 cancel_work_sync(&vpu_data->work);
926 flush_workqueue(vpu_data->workqueue);
927 destroy_workqueue(vpu_data->workqueue);
930 gen_pool_free(iram_pool, iram_base, iram.end-iram.start+1);
933 device_destroy(vpu_class, MKDEV(vpu_major, 0));
934 class_destroy(vpu_class);
935 unregister_chrdev(vpu_major, "mxc_vpu");
939 vpu_free_dma_buffer(&bitwork_mem);
940 vpu_free_dma_buffer(&pic_para_mem);
941 vpu_free_dma_buffer(&user_data_mem);
943 /* reset VPU state */
945 vpu_clk_enable(vpu_data);
947 vpu_clk_disable(vpu_data);
954 #ifdef CONFIG_PM_SLEEP
955 static int vpu_suspend(struct device *dev)
957 struct vpu_priv *vpu_data = dev_get_drvdata(dev);
958 unsigned long timeout;
960 mutex_lock(&vpu_data->lock);
963 /* Wait for vpu go to idle state, suspect vpu cannot be changed
964 * to idle state after about 1 sec
966 timeout = jiffies + HZ;
967 while (READ_REG(BIT_BUSY_FLAG)) {
969 if (time_after(jiffies, timeout)) {
970 mutex_unlock(&vpu_data->lock);
975 if (vpu_data->soc_data->is_mx53) {
976 mutex_unlock(&vpu_data->lock);
980 if (bitwork_mem.cpu_addr != 0) {
983 /* Save 64 registers from BIT_CODE_BUF_ADDR */
984 for (i = 0; i < 64; i++)
985 regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
986 pc_before_suspend = READ_REG(BIT_CUR_PC);
989 vpu_clk_disable(vpu_data);
990 /* If VPU is working before suspend, disable
991 * regulator to make usecount right.
996 mutex_unlock(&vpu_data->lock);
1000 static int vpu_resume(struct device *dev)
1003 struct vpu_priv *vpu_data = dev_get_drvdata(dev);
1005 mutex_lock(&vpu_data->lock);
1008 if (vpu_data->soc_data->is_mx53) {
1009 vpu_clk_enable(vpu_data);
1013 /* If VPU is working before suspend, enable
1014 * regulator to make usecount right.
1018 if (bitwork_mem.cpu_addr != NULL) {
1019 u32 *p = bitwork_mem.cpu_addr;
1024 vpu_clk_enable(vpu_data);
1026 pc = READ_REG(BIT_CUR_PC);
1028 dev_warn(vpu_dev, "Not power off after suspend (PC=0x%x)\n", pc);
1032 /* Restore registers */
1033 for (i = 0; i < 64; i++)
1034 WRITE_REG(regBk[i], BIT_CODE_BUF_ADDR + (i * 4));
1036 WRITE_REG(0x0, BIT_RESET_CTRL);
1037 WRITE_REG(0x0, BIT_CODE_RUN);
1039 /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
1040 if (vpu_data->soc_data->quirk_subblk_en)
1041 WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
1044 * Re-load boot code, from the codebuffer in external RAM.
1045 * Thankfully, we only need 4096 bytes, same for all platforms.
1047 for (i = 0; i < 2048; i += 4) {
1048 data = p[(i / 2) + 1];
1049 data_hi = (data >> 16) & 0xFFFF;
1050 data_lo = data & 0xFFFF;
1051 WRITE_REG((i << 16) | data_hi, BIT_CODE_DOWN);
1052 WRITE_REG(((i + 1) << 16) | data_lo,
1056 data_hi = (data >> 16) & 0xFFFF;
1057 data_lo = data & 0xFFFF;
1058 WRITE_REG(((i + 2) << 16) | data_hi,
1060 WRITE_REG(((i + 3) << 16) | data_lo,
1064 if (pc_before_suspend) {
1065 WRITE_REG(0x1, BIT_BUSY_FLAG);
1066 WRITE_REG(0x1, BIT_CODE_RUN);
1067 while (READ_REG(BIT_BUSY_FLAG))
1070 dev_warn(vpu_dev, "PC=0 before suspend\n");
1075 mutex_unlock(&vpu_data->lock);
1079 static SIMPLE_DEV_PM_OPS(vpu_pm_ops, vpu_suspend, vpu_resume);
1080 #define VPU_PM_OPS &vpu_pm_ops
1082 #define VPU_PM_OPS NULL
1083 #endif /* !CONFIG_PM_SLEEP */
1085 /*! Driver definition
1088 static struct platform_driver mxcvpu_driver = {
1091 .of_match_table = vpu_of_match,
1094 .probe = vpu_dev_probe,
1095 .remove = vpu_dev_remove,
1098 module_platform_driver(mxcvpu_driver);
1100 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1101 MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
1102 MODULE_LICENSE("GPL");