2 * Copyright 2006-2013 Freescale Semiconductor, Inc. All Rights Reserved.
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
17 * @brief VPU system initialization and file operation implementation
22 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/stat.h>
27 #include <linux/platform_device.h>
28 #include <linux/kdev_t.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/wait.h>
31 #include <linux/list.h>
32 #include <linux/clk.h>
33 #include <linux/delay.h>
34 #include <linux/fsl_devices.h>
35 #include <linux/uaccess.h>
37 #include <linux/slab.h>
38 #include <linux/workqueue.h>
39 #include <linux/sched.h>
40 #include <linux/vmalloc.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/page-flags.h>
43 #include <linux/mm_types.h>
44 #include <linux/types.h>
45 #include <linux/memblock.h>
46 #include <linux/memory.h>
47 #include <linux/version.h>
48 #include <linux/module.h>
49 #include <linux/pm_runtime.h>
50 #include <linux/sizes.h>
51 #include <linux/genalloc.h>
53 #include <linux/of_device.h>
54 #include <linux/reset.h>
55 #include <linux/clk.h>
56 #include <linux/mxc_vpu.h>
58 /* Define one new pgprot which combined uncached and XN(never executable) */
59 #define pgprot_noncachedxn(prot) \
60 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
63 struct fasync_struct *async_queue;
64 struct work_struct work;
65 struct workqueue_struct *workqueue;
67 const struct mxc_vpu_soc_data *soc_data;
71 struct vpu_user_data {
72 struct vpu_priv *vpu_data;
76 /* To track the allocated memory buffer */
77 struct memalloc_record {
78 struct list_head list;
79 struct vpu_mem_desc mem;
87 struct mxc_vpu_soc_data {
88 unsigned vpu_pwr_mgmnt:1,
98 static struct gen_pool *iram_pool;
101 static LIST_HEAD(mem_list);
103 static int vpu_major;
104 static struct class *vpu_class;
105 static struct vpu_priv vpu_data;
106 static u8 open_count;
107 static struct clk *vpu_clk;
108 static struct vpu_mem_desc bitwork_mem;
109 static struct vpu_mem_desc pic_para_mem;
110 static struct vpu_mem_desc user_data_mem;
111 static struct vpu_mem_desc share_mem;
112 static struct vpu_mem_desc vshare_mem;
114 static void __iomem *vpu_base;
115 static int vpu_ipi_irq;
116 static u32 phy_vpu_base_addr;
118 static struct device *vpu_dev;
121 static struct iram_setting iram;
123 /* implement the blocking ioctl */
124 static int irq_status;
125 static int codec_done;
126 static wait_queue_head_t vpu_queue;
128 static int vpu_jpu_irq;
131 static unsigned int regBk[64];
132 static unsigned int pc_before_suspend;
134 static struct regulator *vpu_regulator;
136 #define READ_REG(x) readl_relaxed(vpu_base + (x))
137 #define WRITE_REG(val, x) writel_relaxed(val, vpu_base + (x))
139 static int vpu_clk_enable(struct vpu_priv *vpu_data)
143 if (vpu_data->clk_enabled++ == 0)
144 ret = clk_prepare_enable(vpu_clk);
146 if (WARN_ON(vpu_data->clk_enabled <= 0))
152 static int vpu_clk_disable(struct vpu_priv *vpu_data)
154 if (WARN_ON(vpu_data->clk_enabled == 0))
157 if (--vpu_data->clk_enabled == 0)
158 clk_disable_unprepare(vpu_clk);
162 static inline int vpu_reset(void)
164 return device_reset(vpu_dev);
167 static void vpu_power_up(void)
171 if (IS_ERR(vpu_regulator))
174 ret = regulator_enable(vpu_regulator);
176 dev_err(vpu_dev, "failed to power up vpu: %d\n", ret);
179 static void vpu_power_down(void)
183 if (IS_ERR(vpu_regulator))
186 ret = regulator_disable(vpu_regulator);
188 dev_err(vpu_dev, "failed to power down vpu: %d\n", ret);
192 * Private function to alloc dma buffer
193 * @return status 0 success.
195 static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
197 mem->cpu_addr = dma_alloc_coherent(vpu_dev, PAGE_ALIGN(mem->size),
199 GFP_DMA | GFP_KERNEL);
200 dev_dbg(vpu_dev, "[ALLOC] mem alloc cpu_addr = %p\n", mem->cpu_addr);
201 if (mem->cpu_addr == NULL) {
202 dev_err(vpu_dev, "Physical memory allocation error!\n");
209 * Private function to free dma buffer
211 static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
213 if (mem->cpu_addr != NULL)
214 dma_free_coherent(vpu_dev, PAGE_ALIGN(mem->size),
215 mem->cpu_addr, mem->phy_addr);
219 * Private function to free buffers
220 * @return status 0 success.
222 static int vpu_free_buffers(void)
224 struct memalloc_record *rec, *n;
225 struct vpu_mem_desc mem;
227 list_for_each_entry_safe(rec, n, &mem_list, list) {
229 if (mem.cpu_addr != 0) {
230 vpu_free_dma_buffer(&mem);
231 dev_dbg(vpu_dev, "[FREE] freed paddr=0x%08X\n", mem.phy_addr);
232 /* delete from list */
233 list_del(&rec->list);
241 static inline void vpu_worker_callback(struct work_struct *w)
243 struct vpu_priv *dev = container_of(w, struct vpu_priv, work);
245 if (dev->async_queue)
246 kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
250 * Clock is gated on when dec/enc started, gate it off when
256 wake_up_interruptible(&vpu_queue);
260 * @brief vpu interrupt handler
262 static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
264 struct vpu_priv *dev = dev_id;
267 reg = READ_REG(BIT_INT_REASON);
270 WRITE_REG(0x1, BIT_INT_CLEAR);
272 queue_work(dev->workqueue, &dev->work);
278 * @brief vpu jpu interrupt handler
280 static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
282 struct vpu_priv *dev = dev_id;
285 reg = READ_REG(MJPEG_PIC_STATUS_REG);
289 queue_work(dev->workqueue, &dev->work);
295 * @brief open function for vpu file operation
297 * @return 0 on success or negative error code on error
299 static int vpu_open(struct inode *inode, struct file *filp)
301 struct vpu_user_data *user_data = devm_kzalloc(vpu_dev,
304 if (user_data == NULL)
307 user_data->vpu_data = &vpu_data;
309 mutex_lock(&vpu_data.lock);
311 if (open_count++ == 0) {
312 pm_runtime_get_sync(vpu_dev);
316 filp->private_data = user_data;
317 mutex_unlock(&vpu_data.lock);
322 * @brief IO ctrl function for vpu file operation
323 * @param cmd IO ctrl command
324 * @return 0 on success or negative error code on error
326 static long vpu_ioctl(struct file *filp, u_int cmd,
330 struct vpu_user_data *user_data = filp->private_data;
331 struct vpu_priv *vpu_data = user_data->vpu_data;
334 case VPU_IOC_PHYMEM_ALLOC:
336 struct memalloc_record *rec;
338 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
342 if (copy_from_user(&rec->mem,
343 (struct vpu_mem_desc *)arg,
344 sizeof(struct vpu_mem_desc))) {
349 dev_dbg(vpu_dev, "[ALLOC] mem alloc size = 0x%x\n",
352 ret = vpu_alloc_dma_buffer(&rec->mem);
357 if (copy_to_user((void __user *)arg, &rec->mem,
358 sizeof(struct vpu_mem_desc))) {
363 mutex_lock(&vpu_data->lock);
364 list_add(&rec->list, &mem_list);
365 mutex_unlock(&vpu_data->lock);
369 case VPU_IOC_PHYMEM_FREE:
371 struct memalloc_record *rec, *n;
372 struct vpu_mem_desc vpu_mem;
374 if (copy_from_user(&vpu_mem,
375 (struct vpu_mem_desc *)arg,
376 sizeof(struct vpu_mem_desc)))
379 dev_dbg(vpu_dev, "[FREE] mem freed cpu_addr = %p\n",
381 if (vpu_mem.cpu_addr != NULL)
382 vpu_free_dma_buffer(&vpu_mem);
384 mutex_lock(&vpu_data->lock);
385 list_for_each_entry_safe(rec, n, &mem_list, list) {
386 if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
387 list_del(&rec->list);
392 mutex_unlock(&vpu_data->lock);
396 case VPU_IOC_WAIT4INT:
398 u_long timeout = arg;
400 ret = wait_event_interruptible_timeout(vpu_queue,
402 msecs_to_jiffies(timeout));
404 dev_warn(vpu_dev, "VPU blocking: timeout.\n");
406 } else if (signal_pending(current)) {
407 dev_warn(vpu_dev, "VPU interrupt received.\n");
414 case VPU_IOC_IRAM_SETTING:
415 ret = copy_to_user((void __user *)arg, &iram,
416 sizeof(struct iram_setting));
421 case VPU_IOC_CLKGATE_SETTING:
425 if (get_user(clkgate_en, (u32 __user *)arg))
428 mutex_lock(&vpu_data->lock);
430 ret = vpu_clk_enable(vpu_data);
432 user_data->clk_enable_cnt++;
434 if (user_data->clk_enable_cnt == 0) {
437 if (--user_data->clk_enable_cnt == 0)
438 vpu_clk_disable(vpu_data);
442 mutex_unlock(&vpu_data->lock);
445 case VPU_IOC_GET_SHARE_MEM:
446 mutex_lock(&vpu_data->lock);
447 if (share_mem.cpu_addr == NULL) {
448 if (copy_from_user(&share_mem,
449 (struct vpu_mem_desc *)arg,
450 sizeof(struct vpu_mem_desc))) {
451 mutex_unlock(&vpu_data->lock);
454 ret = vpu_alloc_dma_buffer(&share_mem);
456 mutex_unlock(&vpu_data->lock);
460 if (copy_to_user((void __user *)arg,
462 sizeof(struct vpu_mem_desc)))
466 mutex_unlock(&vpu_data->lock);
468 case VPU_IOC_REQ_VSHARE_MEM:
469 mutex_lock(&vpu_data->lock);
470 if (vshare_mem.cpu_addr == NULL) {
471 if (copy_from_user(&vshare_mem,
472 (struct vpu_mem_desc *)arg,
475 mutex_unlock(&vpu_data->lock);
478 vshare_mem.cpu_addr = vmalloc_user(vshare_mem.size);
479 if (vshare_mem.cpu_addr == NULL) {
480 mutex_unlock(&vpu_data->lock);
484 if (copy_to_user((void __user *)arg, &vshare_mem,
485 sizeof(struct vpu_mem_desc)))
489 mutex_unlock(&vpu_data->lock);
491 case VPU_IOC_GET_WORK_ADDR:
492 if (bitwork_mem.cpu_addr == 0) {
493 if (copy_from_user(&bitwork_mem,
494 (struct vpu_mem_desc *)arg,
495 sizeof(struct vpu_mem_desc)))
498 ret = vpu_alloc_dma_buffer(&bitwork_mem);
502 if (copy_to_user((void __user *)arg,
511 * The following two ioctls are used when user allocates a working buffer
512 * and registers it to vpu driver.
514 case VPU_IOC_QUERY_BITWORK_MEM:
515 if (copy_to_user((void __user *)arg,
517 sizeof(struct vpu_mem_desc)))
522 case VPU_IOC_SET_BITWORK_MEM:
523 if (copy_from_user(&bitwork_mem,
524 (struct vpu_mem_desc *)arg,
525 sizeof(struct vpu_mem_desc)))
530 case VPU_IOC_SYS_SW_RESET:
533 case VPU_IOC_REG_DUMP:
534 case VPU_IOC_PHYMEM_DUMP:
537 case VPU_IOC_PHYMEM_CHECK:
539 struct vpu_mem_desc check_memory;
541 ret = copy_from_user(&check_memory,
543 sizeof(struct vpu_mem_desc));
545 dev_err(vpu_dev, "copy from user failure:%d\n", ret);
549 check_memory.size = 1;
550 if (copy_to_user((void __user *)arg, &check_memory,
551 sizeof(struct vpu_mem_desc)))
557 case VPU_IOC_LOCK_DEV:
561 if (get_user(lock_en, (u32 __user *)arg))
565 mutex_lock(&vpu_data->lock);
567 mutex_unlock(&vpu_data->lock);
572 dev_err(vpu_dev, "No such IOCTL, cmd is %d\n", cmd);
578 * @brief Release function for vpu file operation
579 * @return 0 on success or negative error code on error
581 static int vpu_release(struct inode *inode, struct file *filp)
583 unsigned long timeout;
584 struct vpu_user_data *user_data = filp->private_data;
585 struct vpu_priv *vpu_data = user_data->vpu_data;
587 mutex_lock(&vpu_data->lock);
589 if (open_count > 0 && !--open_count) {
590 /* Wait for vpu go to idle state */
591 vpu_clk_enable(vpu_data);
592 if (READ_REG(BIT_CUR_PC)) {
594 timeout = jiffies + HZ;
595 while (READ_REG(BIT_BUSY_FLAG)) {
597 if (time_after(jiffies, timeout)) {
598 dev_warn(vpu_dev, "VPU timeout during release\n");
603 /* Clean up interrupt */
604 cancel_work_sync(&vpu_data->work);
605 flush_workqueue(vpu_data->workqueue);
608 if (READ_REG(BIT_BUSY_FLAG)) {
609 if (vpu_data->soc_data->is_mx51 ||
610 vpu_data->soc_data->is_mx53) {
612 "fatal error: can't gate/power off when VPU is busy\n");
613 vpu_clk_disable(vpu_data);
614 mutex_unlock(&vpu_data->lock);
617 if (vpu_data->soc_data->is_mx6dl ||
618 vpu_data->soc_data->is_mx6q) {
619 WRITE_REG(0x11, 0x10F0);
620 timeout = jiffies + HZ;
621 while (READ_REG(0x10F4) != 0x77) {
623 if (time_after(jiffies, timeout))
627 if (READ_REG(0x10F4) != 0x77) {
629 "fatal error: can't gate/power off when VPU is busy\n");
630 WRITE_REG(0x0, 0x10F0);
631 vpu_clk_disable(vpu_data);
632 mutex_unlock(&vpu_data->lock);
642 /* Free shared memory when vpu device is idle */
643 vpu_free_dma_buffer(&share_mem);
644 share_mem.cpu_addr = 0;
645 vfree(vshare_mem.cpu_addr);
646 vshare_mem.cpu_addr = 0;
648 if (user_data->clk_enable_cnt)
649 vpu_clk_disable(vpu_data);
651 vpu_clk_disable(vpu_data);
653 pm_runtime_put_sync_suspend(vpu_dev);
654 devm_kfree(vpu_dev, user_data);
656 mutex_unlock(&vpu_data->lock);
662 * @brief fasync function for vpu file operation
663 * @return 0 on success or negative error code on error
665 static int vpu_fasync(int fd, struct file *filp, int mode)
667 struct vpu_user_data *user_data = filp->private_data;
668 struct vpu_priv *vpu_data = user_data->vpu_data;
669 return fasync_helper(fd, filp, mode, &vpu_data->async_queue);
673 * @brief memory map function of harware registers for vpu file operation
674 * @return 0 on success or negative error code on error
676 static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
680 vm->vm_flags |= VM_IO;
682 * Since vpu registers have been mapped with ioremap() at probe
683 * which L_PTE_XN is 1, and the same physical address must be
684 * mapped multiple times with same type, so set L_PTE_XN to 1 here.
685 * Otherwise, there may be unexpected result in video codec.
687 vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
688 pfn = phy_vpu_base_addr >> PAGE_SHIFT;
689 dev_dbg(vpu_dev, "size=0x%lx, page no.=0x%lx\n",
690 vm->vm_end - vm->vm_start, pfn);
691 return remap_pfn_range(vm, vm->vm_start, pfn,
692 vm->vm_end - vm->vm_start,
693 vm->vm_page_prot) ? -EAGAIN : 0;
697 * @brief memory map function of memory for vpu file operation
698 * @return 0 on success or negative error code on error
700 static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
702 size_t request_size = vm->vm_end - vm->vm_start;
704 dev_dbg(vpu_dev, "start=0x%08lx, pgoff=0x%08lx, size=%zx\n",
705 vm->vm_start, vm->vm_pgoff, request_size);
707 vm->vm_flags |= VM_IO;
708 vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
710 return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
711 request_size, vm->vm_page_prot) ? -EAGAIN : 0;
715 * @brief memory map function of vmalloced share memory
716 * @return 0 on success or negative error code on error
718 static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
722 ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
723 vm->vm_flags |= VM_IO;
727 * @brief memory map interface for vpu file operation
728 * @return 0 on success or negative error code on error
730 static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
732 unsigned long offset;
734 offset = (unsigned long)vshare_mem.cpu_addr >> PAGE_SHIFT;
736 if (vm->vm_pgoff && (vm->vm_pgoff == offset))
737 return vpu_map_vshare_mem(fp, vm);
738 else if (vm->vm_pgoff)
739 return vpu_map_dma_mem(fp, vm);
741 return vpu_map_hwregs(fp, vm);
744 static const struct file_operations vpu_fops = {
745 .owner = THIS_MODULE,
747 .unlocked_ioctl = vpu_ioctl,
748 .release = vpu_release,
749 .fasync = vpu_fasync,
753 static const struct mxc_vpu_soc_data imx6dl_vpu_data = {
754 .regulator_required = 1,
759 static const struct mxc_vpu_soc_data imx6q_vpu_data = {
760 .quirk_subblk_en = 1,
761 .regulator_required = 1,
766 static const struct mxc_vpu_soc_data imx53_vpu_data = {
769 static const struct mxc_vpu_soc_data imx51_vpu_data = {
773 static const struct of_device_id vpu_of_match[] = {
774 { .compatible = "fsl,imx6dl-vpu", .data = &imx6dl_vpu_data, },
775 { .compatible = "fsl,imx6q-vpu", .data = &imx6q_vpu_data, },
776 { .compatible = "fsl,imx53-vpu", .data = &imx53_vpu_data, },
777 { .compatible = "fsl,imx51-vpu", .data = &imx51_vpu_data, },
780 MODULE_DEVICE_TABLE(of, vpu_of_match);
783 * This function is called by the driver framework to initialize the vpu device.
784 * @param dev The device structure for the vpu passed in by the framework.
785 * @return 0 on success or negative error code on error
787 static int vpu_dev_probe(struct platform_device *pdev)
790 struct device *temp_class;
791 struct resource *res;
792 unsigned long addr = 0;
793 struct device_node *np = pdev->dev.of_node;
795 struct vpu_priv *drv_data;
796 const struct of_device_id *of_id = of_match_device(vpu_of_match,
798 const struct mxc_vpu_soc_data *soc_data = of_id->data;
800 drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
801 if (drv_data == NULL)
804 drv_data->soc_data = soc_data;
805 mutex_init(&drv_data->lock);
807 init_waitqueue_head(&vpu_queue);
808 drv_data->workqueue = create_workqueue("vpu_wq");
809 INIT_WORK(&drv_data->work, vpu_worker_callback);
811 err = of_property_read_u32(np, "iramsize", &iramsize);
812 if (!err && iramsize) {
813 iram_pool = of_get_named_gen_pool(np, "iram", 0);
815 dev_err(&pdev->dev, "iram pool not available\n");
819 iram_base = gen_pool_alloc(iram_pool, iramsize);
821 dev_err(&pdev->dev, "unable to alloc iram\n");
825 addr = gen_pool_virt_to_phys(iram_pool, iram_base);
829 iram.start = iram.end = 0;
832 iram.end = addr + iramsize - 1;
835 vpu_dev = &pdev->dev;
837 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
839 dev_err(vpu_dev, "vpu: unable to get vpu base addr\n");
842 phy_vpu_base_addr = res->start;
843 vpu_base = devm_ioremap_resource(&pdev->dev, res);
844 if (IS_ERR(vpu_base))
845 return PTR_ERR(vpu_base);
847 vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
849 dev_err(vpu_dev, "vpu: unable to get a major for VPU\n");
853 vpu_class = class_create(THIS_MODULE, "mxc_vpu");
854 if (IS_ERR(vpu_class)) {
855 err = PTR_ERR(vpu_class);
859 temp_class = device_create(vpu_class, NULL, MKDEV(vpu_major, 0),
861 if (IS_ERR(temp_class)) {
862 err = PTR_ERR(temp_class);
866 vpu_clk = clk_get(&pdev->dev, "vpu_clk");
867 if (IS_ERR(vpu_clk)) {
868 err = PTR_ERR(vpu_clk);
872 vpu_ipi_irq = platform_get_irq_byname(pdev, "vpu_ipi_irq");
873 if (vpu_ipi_irq < 0) {
874 dev_err(vpu_dev, "vpu: unable to get vpu interrupt\n");
878 err = request_irq(vpu_ipi_irq, vpu_ipi_irq_handler, 0, "VPU_CODEC_IRQ",
883 vpu_regulator = devm_regulator_get(vpu_dev, "pu");
884 if (IS_ERR(vpu_regulator)) {
885 if (drv_data->soc_data->regulator_required) {
886 dev_err(vpu_dev, "failed to get vpu power\n");
889 /* regulator_get will return error on MX5x,
890 * just igore it everywhere
892 dev_warn(vpu_dev, "failed to get vpu power\n");
896 platform_set_drvdata(pdev, drv_data);
898 if (drv_data->soc_data->has_jpu) {
899 vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
900 if (vpu_jpu_irq < 0) {
901 dev_err(vpu_dev, "vpu: unable to get vpu jpu interrupt\n");
905 err = request_irq(vpu_jpu_irq, vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
906 "VPU_JPG_IRQ", &vpu_data);
911 pm_runtime_enable(&pdev->dev);
913 dev_info(vpu_dev, "VPU initialized\n");
917 device_destroy(vpu_class, MKDEV(vpu_major, 0));
918 class_destroy(vpu_class);
920 unregister_chrdev(vpu_major, "mxc_vpu");
924 static int vpu_dev_remove(struct platform_device *pdev)
926 struct vpu_priv *vpu_data = platform_get_drvdata(pdev);
928 pm_runtime_disable(&pdev->dev);
930 free_irq(vpu_ipi_irq, &vpu_data);
931 #ifdef MXC_VPU_HAS_JPU
932 free_irq(vpu_jpu_irq, &vpu_data);
934 cancel_work_sync(&vpu_data->work);
935 flush_workqueue(vpu_data->workqueue);
936 destroy_workqueue(vpu_data->workqueue);
940 gen_pool_free(iram_pool, iram_base, iram.end-iram.start+1);
943 device_destroy(vpu_class, MKDEV(vpu_major, 0));
944 class_destroy(vpu_class);
945 unregister_chrdev(vpu_major, "mxc_vpu");
949 vpu_free_dma_buffer(&bitwork_mem);
950 vpu_free_dma_buffer(&pic_para_mem);
951 vpu_free_dma_buffer(&user_data_mem);
953 /* reset VPU state */
955 vpu_clk_enable(vpu_data);
957 vpu_clk_disable(vpu_data);
965 static int vpu_suspend(struct device *dev)
967 struct vpu_priv *vpu_data = dev_get_drvdata(dev);
968 unsigned long timeout;
970 mutex_lock(&vpu_data->lock);
973 /* Wait for vpu go to idle state, suspect vpu cannot be changed
974 * to idle state after about 1 sec
976 timeout = jiffies + HZ;
977 while (READ_REG(BIT_BUSY_FLAG)) {
979 if (time_after(jiffies, timeout)) {
980 mutex_unlock(&vpu_data->lock);
985 if (vpu_data->soc_data->is_mx53) {
986 mutex_unlock(&vpu_data->lock);
990 if (bitwork_mem.cpu_addr != 0) {
993 /* Save 64 registers from BIT_CODE_BUF_ADDR */
994 for (i = 0; i < 64; i++)
995 regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
996 pc_before_suspend = READ_REG(BIT_CUR_PC);
999 vpu_clk_disable(vpu_data);
1000 /* If VPU is working before suspend, disable
1001 * regulator to make usecount right.
1006 mutex_unlock(&vpu_data->lock);
1010 static int vpu_resume(struct device *dev)
1013 struct vpu_priv *vpu_data = dev_get_drvdata(dev);
1015 mutex_lock(&vpu_data->lock);
1018 if (vpu_data->soc_data->is_mx53) {
1019 vpu_clk_enable(vpu_data);
1023 /* If VPU is working before suspend, enable
1024 * regulator to make usecount right.
1028 if (bitwork_mem.cpu_addr != NULL) {
1029 u32 *p = bitwork_mem.cpu_addr;
1034 vpu_clk_enable(vpu_data);
1036 pc = READ_REG(BIT_CUR_PC);
1038 dev_warn(vpu_dev, "Not power off after suspend (PC=0x%x)\n", pc);
1042 /* Restore registers */
1043 for (i = 0; i < 64; i++)
1044 WRITE_REG(regBk[i], BIT_CODE_BUF_ADDR + (i * 4));
1046 WRITE_REG(0x0, BIT_RESET_CTRL);
1047 WRITE_REG(0x0, BIT_CODE_RUN);
1048 /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
1049 if (vpu_data->soc_data->quirk_subblk_en)
1050 WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
1053 * Re-load boot code, from the codebuffer in external RAM.
1054 * Thankfully, we only need 4096 bytes, same for all platforms.
1056 for (i = 0; i < 2048; i += 4) {
1057 data = p[(i / 2) + 1];
1058 data_hi = (data >> 16) & 0xFFFF;
1059 data_lo = data & 0xFFFF;
1060 WRITE_REG((i << 16) | data_hi, BIT_CODE_DOWN);
1061 WRITE_REG(((i + 1) << 16) | data_lo,
1065 data_hi = (data >> 16) & 0xFFFF;
1066 data_lo = data & 0xFFFF;
1067 WRITE_REG(((i + 2) << 16) | data_hi,
1069 WRITE_REG(((i + 3) << 16) | data_lo,
1073 if (pc_before_suspend) {
1074 WRITE_REG(0x1, BIT_BUSY_FLAG);
1075 WRITE_REG(0x1, BIT_CODE_RUN);
1076 while (READ_REG(BIT_BUSY_FLAG))
1079 dev_warn(vpu_dev, "PC=0 before suspend\n");
1084 mutex_unlock(&vpu_data->lock);
1088 static SIMPLE_DEV_PM_OPS(vpu_pm_ops, vpu_suspend, vpu_resume);
1089 #define VPU_PM_OPS &vpu_pm_ops
1091 #define VPU_PM_OPS NULL
1092 #endif /* !CONFIG_PM */
1094 /*! Driver definition
1097 static struct platform_driver mxcvpu_driver = {
1100 .of_match_table = vpu_of_match,
1103 .probe = vpu_dev_probe,
1104 .remove = vpu_dev_remove,
1107 module_platform_driver(mxcvpu_driver);
1109 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1110 MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
1111 MODULE_LICENSE("GPL");