2 * Copyright 2006-2013 Freescale Semiconductor, Inc. All Rights Reserved.
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
17 * @brief VPU system initialization and file operation implementation
22 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/stat.h>
27 #include <linux/platform_device.h>
28 #include <linux/kdev_t.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/iram_alloc.h>
31 #include <linux/wait.h>
32 #include <linux/list.h>
33 #include <linux/clk.h>
34 #include <linux/delay.h>
35 #include <linux/fsl_devices.h>
36 #include <linux/uaccess.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/sched.h>
41 #include <linux/vmalloc.h>
42 #include <linux/regulator/consumer.h>
43 #include <linux/page-flags.h>
44 #include <linux/mm_types.h>
45 #include <linux/types.h>
46 #include <linux/memblock.h>
47 #include <linux/memory.h>
48 #include <linux/version.h>
49 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
50 #include <linux/module.h>
51 #include <linux/pm_runtime.h>
52 #include <mach/busfreq.h>
53 #include <mach/hardware.h>
54 #include <mach/common.h>
57 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
58 #include <linux/sizes.h>
60 #include <asm/sizes.h>
62 #include <mach/clock.h>
63 #include <mach/hardware.h>
65 #include <mach/mxc_vpu.h>
67 /* Define one new pgprot which combined uncached and XN(never executable) */
68 #define pgprot_noncachedxn(prot) \
69 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
72 struct fasync_struct *async_queue;
73 struct work_struct work;
74 struct workqueue_struct *workqueue;
78 /* To track the allocated memory buffer */
79 struct memalloc_record {
80 struct list_head list;
81 struct vpu_mem_desc mem;
89 static LIST_HEAD(head);
92 static int vpu_clk_usercount;
93 static struct class *vpu_class;
94 static struct vpu_priv vpu_data;
96 static struct clk *vpu_clk;
97 static struct vpu_mem_desc bitwork_mem = { 0 };
98 static struct vpu_mem_desc pic_para_mem = { 0 };
99 static struct vpu_mem_desc user_data_mem = { 0 };
100 static struct vpu_mem_desc share_mem = { 0 };
101 static struct vpu_mem_desc vshare_mem = { 0 };
103 static void __iomem *vpu_base;
104 static int vpu_ipi_irq;
105 static u32 phy_vpu_base_addr;
106 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
107 static phys_addr_t top_address_DRAM;
108 static struct mxc_vpu_platform_data *vpu_plat;
111 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
112 static struct platform_device *vpu_pdev;
116 static struct iram_setting iram;
118 /* implement the blocking ioctl */
119 static int irq_status;
120 static int codec_done;
121 static wait_queue_head_t vpu_queue;
123 #ifdef CONFIG_SOC_IMX6Q
124 #define MXC_VPU_HAS_JPU
127 #ifdef MXC_VPU_HAS_JPU
128 static int vpu_jpu_irq;
131 static unsigned int regBk[64];
132 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
133 static struct regulator *vpu_regulator;
135 static unsigned int pc_before_suspend;
136 static atomic_t clk_cnt_from_ioc = ATOMIC_INIT(0);
138 #define READ_REG(x) readl_relaxed(vpu_base + x)
139 #define WRITE_REG(val, x) writel_relaxed(val, vpu_base + x)
141 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
142 /* redirect to static functions */
143 static int cpu_is_mx6dl(void)
146 ret = of_machine_is_compatible("fsl,imx6dl");
150 static int cpu_is_mx6q(void)
153 ret = of_machine_is_compatible("fsl,imx6q");
159 * Private function to alloc dma buffer
160 * @return status 0 success.
162 static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
164 mem->cpu_addr = (unsigned long)
165 dma_alloc_coherent(NULL, PAGE_ALIGN(mem->size),
166 (dma_addr_t *) (&mem->phy_addr),
167 GFP_DMA | GFP_KERNEL);
168 pr_debug("[ALLOC] mem alloc cpu_addr = 0x%x\n", mem->cpu_addr);
169 if ((void *)(mem->cpu_addr) == NULL) {
170 printk(KERN_ERR "Physical memory allocation error!\n");
177 * Private function to free dma buffer
179 static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
181 if (mem->cpu_addr != 0) {
182 dma_free_coherent(0, PAGE_ALIGN(mem->size),
183 (void *)mem->cpu_addr, mem->phy_addr);
188 * Private function to free buffers
189 * @return status 0 success.
191 static int vpu_free_buffers(void)
193 struct memalloc_record *rec, *n;
194 struct vpu_mem_desc mem;
196 list_for_each_entry_safe(rec, n, &head, list) {
198 if (mem.cpu_addr != 0) {
199 vpu_free_dma_buffer(&mem);
200 pr_debug("[FREE] freed paddr=0x%08X\n", mem.phy_addr);
201 /* delete from list */
202 list_del(&rec->list);
210 static inline void vpu_worker_callback(struct work_struct *w)
212 struct vpu_priv *dev = container_of(w, struct vpu_priv,
215 if (dev->async_queue)
216 kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
220 * Clock is gated on when dec/enc started, gate it off when
226 wake_up_interruptible(&vpu_queue);
230 * @brief vpu interrupt handler
232 static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
234 struct vpu_priv *dev = dev_id;
237 reg = READ_REG(BIT_INT_REASON);
240 WRITE_REG(0x1, BIT_INT_CLEAR);
242 queue_work(dev->workqueue, &dev->work);
248 * @brief vpu jpu interrupt handler
250 #ifdef MXC_VPU_HAS_JPU
251 static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
253 struct vpu_priv *dev = dev_id;
256 reg = READ_REG(MJPEG_PIC_STATUS_REG);
260 queue_work(dev->workqueue, &dev->work);
267 * @brief check phy memory prepare to pass to vpu is valid or not, we
268 * already address some issue that if pass a wrong address to vpu
269 * (like virtual address), system will hang.
271 * @return true return is a valid phy memory address, false return not.
273 bool vpu_is_valid_phy_memory(u32 paddr)
275 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
276 if (paddr > top_address_DRAM)
284 * @brief open function for vpu file operation
286 * @return 0 on success or negative error code on error
288 static int vpu_open(struct inode *inode, struct file *filp)
291 mutex_lock(&vpu_data.lock);
293 if (open_count++ == 0) {
294 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
295 if (!IS_ERR(vpu_regulator))
296 regulator_enable(vpu_regulator);
298 pm_runtime_get_sync(&vpu_pdev->dev);
299 imx_gpc_power_up_pu(true);
302 #ifdef CONFIG_SOC_IMX6Q
303 clk_prepare(vpu_clk);
305 if (READ_REG(BIT_CUR_PC))
306 pr_debug("Not power off before vpu open!\n");
307 clk_disable(vpu_clk);
308 clk_unprepare(vpu_clk);
312 filp->private_data = (void *)(&vpu_data);
313 mutex_unlock(&vpu_data.lock);
318 * @brief IO ctrl function for vpu file operation
319 * @param cmd IO ctrl command
320 * @return 0 on success or negative error code on error
322 static long vpu_ioctl(struct file *filp, u_int cmd,
328 case VPU_IOC_PHYMEM_ALLOC:
330 struct memalloc_record *rec;
332 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
336 ret = copy_from_user(&(rec->mem),
337 (struct vpu_mem_desc *)arg,
338 sizeof(struct vpu_mem_desc));
344 pr_debug("[ALLOC] mem alloc size = 0x%x\n",
347 ret = vpu_alloc_dma_buffer(&(rec->mem));
351 "Physical memory allocation error!\n");
354 ret = copy_to_user((void __user *)arg, &(rec->mem),
355 sizeof(struct vpu_mem_desc));
362 mutex_lock(&vpu_data.lock);
363 list_add(&rec->list, &head);
364 mutex_unlock(&vpu_data.lock);
368 case VPU_IOC_PHYMEM_FREE:
370 struct memalloc_record *rec, *n;
371 struct vpu_mem_desc vpu_mem;
373 ret = copy_from_user(&vpu_mem,
374 (struct vpu_mem_desc *)arg,
375 sizeof(struct vpu_mem_desc));
379 pr_debug("[FREE] mem freed cpu_addr = 0x%x\n",
381 if ((void *)vpu_mem.cpu_addr != NULL)
382 vpu_free_dma_buffer(&vpu_mem);
384 mutex_lock(&vpu_data.lock);
385 list_for_each_entry_safe(rec, n, &head, list) {
386 if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
387 /* delete from list */
388 list_del(&rec->list);
393 mutex_unlock(&vpu_data.lock);
397 case VPU_IOC_WAIT4INT:
399 u_long timeout = (u_long) arg;
400 if (!wait_event_interruptible_timeout
401 (vpu_queue, irq_status != 0,
402 msecs_to_jiffies(timeout))) {
403 printk(KERN_WARNING "VPU blocking: timeout.\n");
405 } else if (signal_pending(current)) {
407 "VPU interrupt received.\n");
413 case VPU_IOC_IRAM_SETTING:
415 ret = copy_to_user((void __user *)arg, &iram,
416 sizeof(struct iram_setting));
422 case VPU_IOC_CLKGATE_SETTING:
426 if (get_user(clkgate_en, (u32 __user *) arg))
430 clk_prepare(vpu_clk);
432 atomic_inc(&clk_cnt_from_ioc);
434 clk_disable(vpu_clk);
435 clk_unprepare(vpu_clk);
436 atomic_dec(&clk_cnt_from_ioc);
441 case VPU_IOC_GET_SHARE_MEM:
443 mutex_lock(&vpu_data.lock);
444 if (share_mem.cpu_addr != 0) {
445 ret = copy_to_user((void __user *)arg,
447 sizeof(struct vpu_mem_desc));
448 mutex_unlock(&vpu_data.lock);
451 if (copy_from_user(&share_mem,
452 (struct vpu_mem_desc *)arg,
453 sizeof(struct vpu_mem_desc))) {
454 mutex_unlock(&vpu_data.lock);
457 if (vpu_alloc_dma_buffer(&share_mem) == -1)
460 if (copy_to_user((void __user *)arg,
467 mutex_unlock(&vpu_data.lock);
470 case VPU_IOC_REQ_VSHARE_MEM:
472 mutex_lock(&vpu_data.lock);
473 if (vshare_mem.cpu_addr != 0) {
474 ret = copy_to_user((void __user *)arg,
476 sizeof(struct vpu_mem_desc));
477 mutex_unlock(&vpu_data.lock);
480 if (copy_from_user(&vshare_mem,
481 (struct vpu_mem_desc *)arg,
484 mutex_unlock(&vpu_data.lock);
487 /* vmalloc shared memory if not allocated */
488 if (!vshare_mem.cpu_addr)
489 vshare_mem.cpu_addr =
491 vmalloc_user(vshare_mem.size);
493 ((void __user *)arg, &vshare_mem,
494 sizeof(struct vpu_mem_desc)))
497 mutex_unlock(&vpu_data.lock);
500 case VPU_IOC_GET_WORK_ADDR:
502 if (bitwork_mem.cpu_addr != 0) {
504 copy_to_user((void __user *)arg,
506 sizeof(struct vpu_mem_desc));
509 if (copy_from_user(&bitwork_mem,
510 (struct vpu_mem_desc *)arg,
511 sizeof(struct vpu_mem_desc)))
514 if (vpu_alloc_dma_buffer(&bitwork_mem) == -1)
516 else if (copy_to_user((void __user *)arg,
525 * The following two ioctl is used when user allocates working buffer
526 * and register it to vpu driver.
528 case VPU_IOC_QUERY_BITWORK_MEM:
530 if (copy_to_user((void __user *)arg,
532 sizeof(struct vpu_mem_desc)))
536 case VPU_IOC_SET_BITWORK_MEM:
538 if (copy_from_user(&bitwork_mem,
539 (struct vpu_mem_desc *)arg,
540 sizeof(struct vpu_mem_desc)))
544 case VPU_IOC_SYS_SW_RESET:
546 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
555 case VPU_IOC_REG_DUMP:
557 case VPU_IOC_PHYMEM_DUMP:
559 case VPU_IOC_PHYMEM_CHECK:
561 struct vpu_mem_desc check_memory;
562 ret = copy_from_user(&check_memory,
564 sizeof(struct vpu_mem_desc));
566 printk(KERN_ERR "copy from user failure:%d\n", ret);
570 ret = vpu_is_valid_phy_memory((u32)check_memory.phy_addr);
572 pr_debug("vpu: memory phy:0x%x %s phy memory\n",
573 check_memory.phy_addr, (ret ? "is" : "isn't"));
574 /* borrow .size to pass back the result. */
575 check_memory.size = ret;
576 ret = copy_to_user((void __user *)arg, &check_memory,
577 sizeof(struct vpu_mem_desc));
584 case VPU_IOC_LOCK_DEV:
588 if (get_user(lock_en, (u32 __user *) arg))
592 mutex_lock(&vpu_data.lock);
594 mutex_unlock(&vpu_data.lock);
600 printk(KERN_ERR "No such IOCTL, cmd is %d\n", cmd);
609 * @brief Release function for vpu file operation
610 * @return 0 on success or negative error code on error
612 static int vpu_release(struct inode *inode, struct file *filp)
615 unsigned long timeout;
617 mutex_lock(&vpu_data.lock);
619 if (open_count > 0 && !(--open_count)) {
621 /* Wait for vpu go to idle state */
622 clk_prepare(vpu_clk);
624 if (READ_REG(BIT_CUR_PC)) {
626 timeout = jiffies + HZ;
627 while (READ_REG(BIT_BUSY_FLAG)) {
629 if (time_after(jiffies, timeout)) {
630 printk(KERN_WARNING "VPU timeout during release\n");
634 clk_disable(vpu_clk);
635 clk_unprepare(vpu_clk);
637 /* Clean up interrupt */
638 cancel_work_sync(&vpu_data.work);
639 flush_workqueue(vpu_data.workqueue);
642 clk_prepare(vpu_clk);
644 if (READ_REG(BIT_BUSY_FLAG)) {
646 if (cpu_is_mx51() || cpu_is_mx53()) {
648 "fatal error: can't gate/power off when VPU is busy\n");
649 clk_disable(vpu_clk);
650 clk_unprepare(vpu_clk);
651 mutex_unlock(&vpu_data.lock);
655 #ifdef CONFIG_SOC_IMX6Q
656 if (cpu_is_mx6dl() || cpu_is_mx6q()) {
657 WRITE_REG(0x11, 0x10F0);
658 timeout = jiffies + HZ;
659 while (READ_REG(0x10F4) != 0x77) {
661 if (time_after(jiffies, timeout))
665 if (READ_REG(0x10F4) != 0x77) {
667 "fatal error: can't gate/power off when VPU is busy\n");
668 WRITE_REG(0x0, 0x10F0);
669 clk_disable(vpu_clk);
670 clk_unprepare(vpu_clk);
671 mutex_unlock(&vpu_data.lock);
674 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
685 clk_disable(vpu_clk);
686 clk_unprepare(vpu_clk);
690 /* Free shared memory when vpu device is idle */
691 vpu_free_dma_buffer(&share_mem);
692 share_mem.cpu_addr = 0;
693 vfree((void *)vshare_mem.cpu_addr);
694 vshare_mem.cpu_addr = 0;
696 vpu_clk_usercount = atomic_read(&clk_cnt_from_ioc);
697 for (i = 0; i < vpu_clk_usercount; i++) {
698 clk_disable(vpu_clk);
699 clk_unprepare(vpu_clk);
700 atomic_dec(&clk_cnt_from_ioc);
703 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
704 if (!IS_ERR(vpu_regulator))
705 regulator_disable(vpu_regulator);
707 imx_gpc_power_up_pu(false);
708 pm_runtime_put_sync_suspend(&vpu_pdev->dev);
712 mutex_unlock(&vpu_data.lock);
718 * @brief fasync function for vpu file operation
719 * @return 0 on success or negative error code on error
721 static int vpu_fasync(int fd, struct file *filp, int mode)
723 struct vpu_priv *dev = (struct vpu_priv *)filp->private_data;
724 return fasync_helper(fd, filp, mode, &dev->async_queue);
728 * @brief memory map function of harware registers for vpu file operation
729 * @return 0 on success or negative error code on error
731 static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
735 vm->vm_flags |= VM_IO | VM_RESERVED;
737 * Since vpu registers have been mapped with ioremap() at probe
738 * which L_PTE_XN is 1, and the same physical address must be
739 * mapped multiple times with same type, so set L_PTE_XN to 1 here.
740 * Otherwise, there may be unexpected result in video codec.
742 vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
743 pfn = phy_vpu_base_addr >> PAGE_SHIFT;
744 pr_debug("size=0x%x, page no.=0x%x\n",
745 (int)(vm->vm_end - vm->vm_start), (int)pfn);
746 return remap_pfn_range(vm, vm->vm_start, pfn, vm->vm_end - vm->vm_start,
747 vm->vm_page_prot) ? -EAGAIN : 0;
751 * @brief memory map function of memory for vpu file operation
752 * @return 0 on success or negative error code on error
754 static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
757 request_size = vm->vm_end - vm->vm_start;
759 pr_debug(" start=0x%x, pgoff=0x%x, size=0x%x\n",
760 (unsigned int)(vm->vm_start), (unsigned int)(vm->vm_pgoff),
763 vm->vm_flags |= VM_IO | VM_RESERVED;
764 vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
766 return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
767 request_size, vm->vm_page_prot) ? -EAGAIN : 0;
772 * @brief memory map function of vmalloced share memory
773 * @return 0 on success or negative error code on error
775 static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
779 ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
780 vm->vm_flags |= VM_IO;
785 * @brief memory map interface for vpu file operation
786 * @return 0 on success or negative error code on error
788 static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
790 unsigned long offset;
792 offset = vshare_mem.cpu_addr >> PAGE_SHIFT;
794 if (vm->vm_pgoff && (vm->vm_pgoff == offset))
795 return vpu_map_vshare_mem(fp, vm);
796 else if (vm->vm_pgoff)
797 return vpu_map_dma_mem(fp, vm);
799 return vpu_map_hwregs(fp, vm);
802 const struct file_operations vpu_fops = {
803 .owner = THIS_MODULE,
805 .unlocked_ioctl = vpu_ioctl,
806 .release = vpu_release,
807 .fasync = vpu_fasync,
812 * This function is called by the driver framework to initialize the vpu device.
813 * @param dev The device structure for the vpu passed in by the framework.
814 * @return 0 on success or negative error code on error
816 static int vpu_dev_probe(struct platform_device *pdev)
819 struct device *temp_class;
820 struct resource *res;
821 unsigned long addr = 0;
823 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
824 struct device_node *np = pdev->dev.of_node;
827 err = of_property_read_u32(np, "iramsize", (u32 *)&iramsize);
828 if (!err && iramsize)
829 iram_alloc(iramsize, &addr);
831 iram.start = iram.end = 0;
834 iram.end = addr + iramsize - 1;
840 vpu_plat = pdev->dev.platform_data;
842 if (vpu_plat && vpu_plat->iram_enable && vpu_plat->iram_size)
843 iram_alloc(vpu_plat->iram_size, &addr);
845 iram.start = iram.end = 0;
848 iram.end = addr + vpu_plat->iram_size - 1;
852 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
854 printk(KERN_ERR "vpu: unable to get vpu base addr\n");
857 phy_vpu_base_addr = res->start;
858 vpu_base = ioremap(res->start, res->end - res->start);
860 vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
862 printk(KERN_ERR "vpu: unable to get a major for VPU\n");
867 vpu_class = class_create(THIS_MODULE, "mxc_vpu");
868 if (IS_ERR(vpu_class)) {
869 err = PTR_ERR(vpu_class);
873 temp_class = device_create(vpu_class, NULL, MKDEV(vpu_major, 0),
875 if (IS_ERR(temp_class)) {
876 err = PTR_ERR(temp_class);
880 vpu_clk = clk_get(&pdev->dev, "vpu_clk");
881 if (IS_ERR(vpu_clk)) {
886 vpu_ipi_irq = platform_get_irq_byname(pdev, "vpu_ipi_irq");
887 if (vpu_ipi_irq < 0) {
888 printk(KERN_ERR "vpu: unable to get vpu interrupt\n");
892 err = request_irq(vpu_ipi_irq, vpu_ipi_irq_handler, 0, "VPU_CODEC_IRQ",
893 (void *)(&vpu_data));
896 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
897 vpu_regulator = regulator_get(NULL, "cpu_vddvpu");
898 if (IS_ERR(vpu_regulator)) {
899 if (!(cpu_is_mx51() || cpu_is_mx53())) {
901 "%s: failed to get vpu regulator\n", __func__);
904 /* regulator_get will return error on MX5x,
905 * just igore it everywhere*/
907 "%s: failed to get vpu regulator\n", __func__);
912 #ifdef MXC_VPU_HAS_JPU
913 vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
914 if (vpu_jpu_irq < 0) {
915 printk(KERN_ERR "vpu: unable to get vpu jpu interrupt\n");
917 free_irq(vpu_ipi_irq, &vpu_data);
920 err = request_irq(vpu_jpu_irq, vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
921 "VPU_JPG_IRQ", (void *)(&vpu_data));
923 free_irq(vpu_ipi_irq, &vpu_data);
928 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
929 pm_runtime_enable(&pdev->dev);
932 vpu_data.workqueue = create_workqueue("vpu_wq");
933 INIT_WORK(&vpu_data.work, vpu_worker_callback);
934 mutex_init(&vpu_data.lock);
935 printk(KERN_INFO "VPU initialized\n");
939 device_destroy(vpu_class, MKDEV(vpu_major, 0));
940 class_destroy(vpu_class);
942 unregister_chrdev(vpu_major, "mxc_vpu");
949 static int vpu_dev_remove(struct platform_device *pdev)
951 free_irq(vpu_ipi_irq, &vpu_data);
952 #ifdef MXC_VPU_HAS_JPU
953 free_irq(vpu_jpu_irq, &vpu_data);
955 cancel_work_sync(&vpu_data.work);
956 flush_workqueue(vpu_data.workqueue);
957 destroy_workqueue(vpu_data.workqueue);
960 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
962 iram_free(iram.start, iram.end-iram.start+1);
964 if (vpu_plat && vpu_plat->iram_enable && vpu_plat->iram_size)
965 iram_free(iram.start, vpu_plat->iram_size);
968 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
969 if (!IS_ERR(vpu_regulator))
970 regulator_put(vpu_regulator);
976 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
977 static int vpu_suspend(struct device *dev)
979 static int vpu_suspend(struct platform_device *pdev, pm_message_t state)
983 unsigned long timeout;
985 mutex_lock(&vpu_data.lock);
986 if (open_count == 0) {
987 /* VPU is released (all instances are freed),
988 * clock is already off, context is no longer needed,
989 * power is already off on MX6,
990 * gate power on MX51 */
992 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
998 /* Wait for vpu go to idle state, suspect vpu cannot be changed
999 to idle state after about 1 sec */
1000 timeout = jiffies + HZ;
1001 clk_prepare(vpu_clk);
1002 clk_enable(vpu_clk);
1003 while (READ_REG(BIT_BUSY_FLAG)) {
1005 if (time_after(jiffies, timeout)) {
1006 clk_disable(vpu_clk);
1007 clk_unprepare(vpu_clk);
1008 mutex_unlock(&vpu_data.lock);
1012 clk_disable(vpu_clk);
1013 clk_unprepare(vpu_clk);
1015 /* Make sure clock is disabled before suspend */
1016 vpu_clk_usercount = atomic_read(&clk_cnt_from_ioc);
1017 for (i = 0; i < vpu_clk_usercount; i++) {
1018 clk_disable(vpu_clk);
1019 clk_unprepare(vpu_clk);
1022 if (cpu_is_mx53()) {
1023 mutex_unlock(&vpu_data.lock);
1027 if (bitwork_mem.cpu_addr != 0) {
1028 clk_prepare(vpu_clk);
1029 clk_enable(vpu_clk);
1030 /* Save 64 registers from BIT_CODE_BUF_ADDR */
1031 for (i = 0; i < 64; i++)
1032 regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
1033 pc_before_suspend = READ_REG(BIT_CUR_PC);
1034 clk_disable(vpu_clk);
1035 clk_unprepare(vpu_clk);
1038 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1043 /* If VPU is working before suspend, disable
1044 * regulator to make usecount right. */
1045 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1046 if (!IS_ERR(vpu_regulator))
1047 regulator_disable(vpu_regulator);
1049 imx_gpc_power_up_pu(false);
1053 mutex_unlock(&vpu_data.lock);
1057 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1058 static int vpu_resume(struct device *dev)
1060 static int vpu_resume(struct platform_device *pdev)
1065 mutex_lock(&vpu_data.lock);
1066 if (open_count == 0) {
1067 /* VPU is released (all instances are freed),
1068 * clock should be kept off, context is no longer needed,
1069 * power should be kept off on MX6,
1070 * disable power gating on MX51 */
1071 if (cpu_is_mx51()) {
1072 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1081 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1082 /* If VPU is working before suspend, enable
1083 * regulator to make usecount right. */
1084 if (!IS_ERR(vpu_regulator))
1085 regulator_enable(vpu_regulator);
1090 imx_gpc_power_up_pu(true);
1093 if (bitwork_mem.cpu_addr != 0) {
1094 u32 *p = (u32 *) bitwork_mem.cpu_addr;
1099 clk_prepare(vpu_clk);
1100 clk_enable(vpu_clk);
1102 pc = READ_REG(BIT_CUR_PC);
1104 printk(KERN_WARNING "Not power off after suspend (PC=0x%x)\n", pc);
1105 clk_disable(vpu_clk);
1106 clk_unprepare(vpu_clk);
1110 /* Restore registers */
1111 for (i = 0; i < 64; i++)
1112 WRITE_REG(regBk[i], BIT_CODE_BUF_ADDR + (i * 4));
1114 WRITE_REG(0x0, BIT_RESET_CTRL);
1115 WRITE_REG(0x0, BIT_CODE_RUN);
1116 /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
1117 #ifdef CONFIG_SOC_IMX6Q
1118 WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
1122 * Re-load boot code, from the codebuffer in external RAM.
1123 * Thankfully, we only need 4096 bytes, same for all platforms.
1125 for (i = 0; i < 2048; i += 4) {
1126 data = p[(i / 2) + 1];
1127 data_hi = (data >> 16) & 0xFFFF;
1128 data_lo = data & 0xFFFF;
1129 WRITE_REG((i << 16) | data_hi, BIT_CODE_DOWN);
1130 WRITE_REG(((i + 1) << 16) | data_lo,
1134 data_hi = (data >> 16) & 0xFFFF;
1135 data_lo = data & 0xFFFF;
1136 WRITE_REG(((i + 2) << 16) | data_hi,
1138 WRITE_REG(((i + 3) << 16) | data_lo,
1142 if (pc_before_suspend) {
1143 WRITE_REG(0x1, BIT_BUSY_FLAG);
1144 WRITE_REG(0x1, BIT_CODE_RUN);
1145 while (READ_REG(BIT_BUSY_FLAG))
1148 printk(KERN_WARNING "PC=0 before suspend\n");
1150 clk_disable(vpu_clk);
1151 clk_unprepare(vpu_clk);
1155 /* Recover vpu clock */
1156 for (i = 0; i < vpu_clk_usercount; i++) {
1157 clk_prepare(vpu_clk);
1158 clk_enable(vpu_clk);
1162 mutex_unlock(&vpu_data.lock);
1166 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1167 static int vpu_runtime_suspend(struct device *dev)
1169 release_bus_freq(BUS_FREQ_HIGH);
1173 static int vpu_runtime_resume(struct device *dev)
1175 request_bus_freq(BUS_FREQ_HIGH);
1179 static const struct dev_pm_ops vpu_pm_ops = {
1180 SET_RUNTIME_PM_OPS(vpu_runtime_suspend, vpu_runtime_resume, NULL)
1181 SET_SYSTEM_SLEEP_PM_OPS(vpu_suspend, vpu_resume)
1186 #define vpu_suspend NULL
1187 #define vpu_resume NULL
1188 #endif /* !CONFIG_PM */
1190 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1191 static const struct of_device_id vpu_of_match[] = {
1192 { .compatible = "fsl,imx6-vpu", },
1195 MODULE_DEVICE_TABLE(of, vpu_of_match);
1198 /*! Driver definition
1201 static struct platform_driver mxcvpu_driver = {
1204 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1205 .of_match_table = vpu_of_match,
1211 .probe = vpu_dev_probe,
1212 .remove = vpu_dev_remove,
1213 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1214 .suspend = vpu_suspend,
1215 .resume = vpu_resume,
1219 static int __init vpu_init(void)
1221 int ret = platform_driver_register(&mxcvpu_driver);
1223 init_waitqueue_head(&vpu_queue);
1226 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1228 top_address_DRAM = memblock_end_of_DRAM_with_reserved();
1234 static void __exit vpu_exit(void)
1236 if (vpu_major > 0) {
1237 device_destroy(vpu_class, MKDEV(vpu_major, 0));
1238 class_destroy(vpu_class);
1239 unregister_chrdev(vpu_major, "mxc_vpu");
1243 vpu_free_dma_buffer(&bitwork_mem);
1244 vpu_free_dma_buffer(&pic_para_mem);
1245 vpu_free_dma_buffer(&user_data_mem);
1247 /* reset VPU state */
1248 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1249 if (!IS_ERR(vpu_regulator))
1250 regulator_enable(vpu_regulator);
1251 clk_prepare(vpu_clk);
1252 clk_enable(vpu_clk);
1253 if (vpu_plat->reset)
1255 clk_disable(vpu_clk);
1256 clk_unprepare(vpu_clk);
1257 if (!IS_ERR(vpu_regulator))
1258 regulator_disable(vpu_regulator);
1260 imx_gpc_power_up_pu(true);
1261 clk_prepare(vpu_clk);
1262 clk_enable(vpu_clk);
1263 imx_src_reset_vpu();
1264 clk_disable(vpu_clk);
1265 clk_unprepare(vpu_clk);
1266 imx_gpc_power_up_pu(false);
1271 platform_driver_unregister(&mxcvpu_driver);
1275 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1276 MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
1277 MODULE_LICENSE("GPL");
1279 module_init(vpu_init);
1280 module_exit(vpu_exit);