2 * Copyright 2006-2013 Freescale Semiconductor, Inc. All Rights Reserved.
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
17 * @brief VPU system initialization and file operation implementation
22 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/stat.h>
27 #include <linux/platform_device.h>
28 #include <linux/kdev_t.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/wait.h>
31 #include <linux/list.h>
32 #include <linux/clk.h>
33 #include <linux/delay.h>
34 #include <linux/fsl_devices.h>
35 #include <linux/uaccess.h>
37 #include <linux/slab.h>
38 #include <linux/workqueue.h>
39 #include <linux/sched.h>
40 #include <linux/vmalloc.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/page-flags.h>
43 #include <linux/mm_types.h>
44 #include <linux/types.h>
45 #include <linux/memblock.h>
46 #include <linux/memory.h>
47 #include <linux/version.h>
50 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
51 #include <linux/module.h>
52 #include <linux/pm_runtime.h>
53 #include <linux/sizes.h>
56 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
57 #include <linux/iram_alloc.h>
58 #include <mach/clock.h>
59 #include <mach/hardware.h>
60 #include <mach/mxc_vpu.h>
63 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
64 #include <linux/genalloc.h>
66 #include <linux/reset.h>
67 #include <linux/clk.h>
68 #include <linux/mxc_vpu.h>
69 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
70 #include <mach/busfreq.h>
71 #include <mach/common.h>
73 #include <asm/sizes.h>
76 /* Define one new pgprot which combined uncached and XN(never executable) */
77 #define pgprot_noncachedxn(prot) \
78 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
81 struct fasync_struct *async_queue;
82 struct work_struct work;
83 struct workqueue_struct *workqueue;
87 /* To track the allocated memory buffer */
88 struct memalloc_record {
89 struct list_head list;
90 struct vpu_mem_desc mem;
98 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
99 static struct gen_pool *iram_pool;
100 static u32 iram_base;
103 static LIST_HEAD(head);
105 static int vpu_major;
106 static int vpu_clk_usercount;
107 static struct class *vpu_class;
108 static struct vpu_priv vpu_data;
109 static u8 open_count;
110 static struct clk *vpu_clk;
111 static struct vpu_mem_desc bitwork_mem = { 0 };
112 static struct vpu_mem_desc pic_para_mem = { 0 };
113 static struct vpu_mem_desc user_data_mem = { 0 };
114 static struct vpu_mem_desc share_mem = { 0 };
115 static struct vpu_mem_desc vshare_mem = { 0 };
117 static void __iomem *vpu_base;
118 static int vpu_ipi_irq;
119 static u32 phy_vpu_base_addr;
120 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
121 static phys_addr_t top_address_DRAM;
122 static struct mxc_vpu_platform_data *vpu_plat;
125 static struct device *vpu_dev;
128 static struct iram_setting iram;
130 /* implement the blocking ioctl */
131 static int irq_status;
132 static int codec_done;
133 static wait_queue_head_t vpu_queue;
135 #ifdef CONFIG_SOC_IMX6Q
136 #define MXC_VPU_HAS_JPU
139 #ifdef MXC_VPU_HAS_JPU
140 static int vpu_jpu_irq;
144 static unsigned int regBk[64];
145 static unsigned int pc_before_suspend;
147 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
148 static struct regulator *vpu_regulator;
150 static atomic_t clk_cnt_from_ioc = ATOMIC_INIT(0);
152 #define READ_REG(x) readl_relaxed(vpu_base + x)
153 #define WRITE_REG(val, x) writel_relaxed(val, vpu_base + x)
155 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
156 /* redirect to static functions */
157 static int cpu_is_mx6dl(void)
160 ret = of_machine_is_compatible("fsl,imx6dl");
164 static int cpu_is_mx6q(void)
167 ret = of_machine_is_compatible("fsl,imx6q");
172 static void vpu_reset(void)
174 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
175 device_reset(vpu_dev);
176 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
184 static long vpu_power_get(bool on)
189 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
190 vpu_regulator = regulator_get(NULL, "cpu_vddvpu");
191 ret = IS_ERR(vpu_regulator);
192 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
193 vpu_regulator = devm_regulator_get(vpu_dev, "pu");
194 ret = IS_ERR(vpu_regulator);
197 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
198 if (!IS_ERR(vpu_regulator))
199 regulator_put(vpu_regulator);
205 static void vpu_power_up(bool on)
207 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
211 if (!IS_ERR(vpu_regulator)) {
212 ret = regulator_enable(vpu_regulator);
214 dev_err(vpu_dev, "failed to power up vpu\n");
217 if (!IS_ERR(vpu_regulator)) {
218 ret = regulator_disable(vpu_regulator);
220 dev_err(vpu_dev, "failed to power down vpu\n");
224 imx_gpc_power_up_pu(on);
228 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
229 static void request_bus_freq(int freq)
233 static void release_bus_freq(int freq)
237 static int cpu_is_mx53(void)
242 static int cpu_is_mx51(void)
247 #define VM_RESERVED 0
248 #define BUS_FREQ_HIGH 0
253 * Private function to alloc dma buffer
254 * @return status 0 success.
256 static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
258 mem->cpu_addr = (unsigned long)
259 dma_alloc_coherent(NULL, PAGE_ALIGN(mem->size),
260 (dma_addr_t *) (&mem->phy_addr),
261 GFP_DMA | GFP_KERNEL);
262 dev_dbg(vpu_dev, "[ALLOC] mem alloc cpu_addr = 0x%x\n", mem->cpu_addr);
263 if ((void *)(mem->cpu_addr) == NULL) {
264 dev_err(vpu_dev, "Physical memory allocation error!\n");
271 * Private function to free dma buffer
273 static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
275 if (mem->cpu_addr != 0) {
276 dma_free_coherent(0, PAGE_ALIGN(mem->size),
277 (void *)mem->cpu_addr, mem->phy_addr);
282 * Private function to free buffers
283 * @return status 0 success.
285 static int vpu_free_buffers(void)
287 struct memalloc_record *rec, *n;
288 struct vpu_mem_desc mem;
290 list_for_each_entry_safe(rec, n, &head, list) {
292 if (mem.cpu_addr != 0) {
293 vpu_free_dma_buffer(&mem);
294 dev_dbg(vpu_dev, "[FREE] freed paddr=0x%08X\n", mem.phy_addr);
295 /* delete from list */
296 list_del(&rec->list);
304 static inline void vpu_worker_callback(struct work_struct *w)
306 struct vpu_priv *dev = container_of(w, struct vpu_priv,
309 if (dev->async_queue)
310 kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
314 * Clock is gated on when dec/enc started, gate it off when
320 wake_up_interruptible(&vpu_queue);
324 * @brief vpu interrupt handler
326 static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
328 struct vpu_priv *dev = dev_id;
331 reg = READ_REG(BIT_INT_REASON);
334 WRITE_REG(0x1, BIT_INT_CLEAR);
336 queue_work(dev->workqueue, &dev->work);
342 * @brief vpu jpu interrupt handler
344 #ifdef MXC_VPU_HAS_JPU
345 static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
347 struct vpu_priv *dev = dev_id;
350 reg = READ_REG(MJPEG_PIC_STATUS_REG);
354 queue_work(dev->workqueue, &dev->work);
361 * @brief check phy memory prepare to pass to vpu is valid or not, we
362 * already address some issue that if pass a wrong address to vpu
363 * (like virtual address), system will hang.
365 * @return true return is a valid phy memory address, false return not.
367 bool vpu_is_valid_phy_memory(u32 paddr)
369 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
370 if (paddr > top_address_DRAM)
378 * @brief open function for vpu file operation
380 * @return 0 on success or negative error code on error
382 static int vpu_open(struct inode *inode, struct file *filp)
385 mutex_lock(&vpu_data.lock);
387 if (open_count++ == 0) {
389 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
390 pm_runtime_get_sync(vpu_dev);
394 #ifdef CONFIG_SOC_IMX6Q
395 clk_prepare(vpu_clk);
397 if (READ_REG(BIT_CUR_PC))
398 dev_dbg(vpu_dev, "Not power off before vpu open!\n");
399 clk_disable(vpu_clk);
400 clk_unprepare(vpu_clk);
404 filp->private_data = (void *)(&vpu_data);
405 mutex_unlock(&vpu_data.lock);
410 * @brief IO ctrl function for vpu file operation
411 * @param cmd IO ctrl command
412 * @return 0 on success or negative error code on error
414 static long vpu_ioctl(struct file *filp, u_int cmd,
420 case VPU_IOC_PHYMEM_ALLOC:
422 struct memalloc_record *rec;
424 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
428 ret = copy_from_user(&(rec->mem),
429 (struct vpu_mem_desc *)arg,
430 sizeof(struct vpu_mem_desc));
436 dev_dbg(vpu_dev, "[ALLOC] mem alloc size = 0x%x\n",
439 ret = vpu_alloc_dma_buffer(&(rec->mem));
443 "Physical memory allocation error!\n");
446 ret = copy_to_user((void __user *)arg, &(rec->mem),
447 sizeof(struct vpu_mem_desc));
454 mutex_lock(&vpu_data.lock);
455 list_add(&rec->list, &head);
456 mutex_unlock(&vpu_data.lock);
460 case VPU_IOC_PHYMEM_FREE:
462 struct memalloc_record *rec, *n;
463 struct vpu_mem_desc vpu_mem;
465 ret = copy_from_user(&vpu_mem,
466 (struct vpu_mem_desc *)arg,
467 sizeof(struct vpu_mem_desc));
471 dev_dbg(vpu_dev, "[FREE] mem freed cpu_addr = 0x%x\n",
473 if ((void *)vpu_mem.cpu_addr != NULL)
474 vpu_free_dma_buffer(&vpu_mem);
476 mutex_lock(&vpu_data.lock);
477 list_for_each_entry_safe(rec, n, &head, list) {
478 if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
479 /* delete from list */
480 list_del(&rec->list);
485 mutex_unlock(&vpu_data.lock);
489 case VPU_IOC_WAIT4INT:
491 u_long timeout = (u_long) arg;
492 if (!wait_event_interruptible_timeout
493 (vpu_queue, irq_status != 0,
494 msecs_to_jiffies(timeout))) {
495 dev_warn(vpu_dev, "VPU blocking: timeout.\n");
497 } else if (signal_pending(current)) {
498 dev_warn(vpu_dev, "VPU interrupt received.\n");
504 case VPU_IOC_IRAM_SETTING:
506 ret = copy_to_user((void __user *)arg, &iram,
507 sizeof(struct iram_setting));
513 case VPU_IOC_CLKGATE_SETTING:
517 if (get_user(clkgate_en, (u32 __user *) arg))
521 clk_prepare(vpu_clk);
523 atomic_inc(&clk_cnt_from_ioc);
525 clk_disable(vpu_clk);
526 clk_unprepare(vpu_clk);
527 atomic_dec(&clk_cnt_from_ioc);
532 case VPU_IOC_GET_SHARE_MEM:
534 mutex_lock(&vpu_data.lock);
535 if (share_mem.cpu_addr != 0) {
536 ret = copy_to_user((void __user *)arg,
538 sizeof(struct vpu_mem_desc));
539 mutex_unlock(&vpu_data.lock);
542 if (copy_from_user(&share_mem,
543 (struct vpu_mem_desc *)arg,
544 sizeof(struct vpu_mem_desc))) {
545 mutex_unlock(&vpu_data.lock);
548 if (vpu_alloc_dma_buffer(&share_mem) == -1)
551 if (copy_to_user((void __user *)arg,
558 mutex_unlock(&vpu_data.lock);
561 case VPU_IOC_REQ_VSHARE_MEM:
563 mutex_lock(&vpu_data.lock);
564 if (vshare_mem.cpu_addr != 0) {
565 ret = copy_to_user((void __user *)arg,
567 sizeof(struct vpu_mem_desc));
568 mutex_unlock(&vpu_data.lock);
571 if (copy_from_user(&vshare_mem,
572 (struct vpu_mem_desc *)arg,
575 mutex_unlock(&vpu_data.lock);
578 /* vmalloc shared memory if not allocated */
579 if (!vshare_mem.cpu_addr)
580 vshare_mem.cpu_addr =
582 vmalloc_user(vshare_mem.size);
584 ((void __user *)arg, &vshare_mem,
585 sizeof(struct vpu_mem_desc)))
588 mutex_unlock(&vpu_data.lock);
591 case VPU_IOC_GET_WORK_ADDR:
593 if (bitwork_mem.cpu_addr != 0) {
595 copy_to_user((void __user *)arg,
597 sizeof(struct vpu_mem_desc));
600 if (copy_from_user(&bitwork_mem,
601 (struct vpu_mem_desc *)arg,
602 sizeof(struct vpu_mem_desc)))
605 if (vpu_alloc_dma_buffer(&bitwork_mem) == -1)
607 else if (copy_to_user((void __user *)arg,
616 * The following two ioctl is used when user allocates working buffer
617 * and register it to vpu driver.
619 case VPU_IOC_QUERY_BITWORK_MEM:
621 if (copy_to_user((void __user *)arg,
623 sizeof(struct vpu_mem_desc)))
627 case VPU_IOC_SET_BITWORK_MEM:
629 if (copy_from_user(&bitwork_mem,
630 (struct vpu_mem_desc *)arg,
631 sizeof(struct vpu_mem_desc)))
635 case VPU_IOC_SYS_SW_RESET:
640 case VPU_IOC_REG_DUMP:
642 case VPU_IOC_PHYMEM_DUMP:
644 case VPU_IOC_PHYMEM_CHECK:
646 struct vpu_mem_desc check_memory;
647 ret = copy_from_user(&check_memory,
649 sizeof(struct vpu_mem_desc));
651 dev_err(vpu_dev, "copy from user failure:%d\n", ret);
655 ret = vpu_is_valid_phy_memory((u32)check_memory.phy_addr);
657 dev_dbg(vpu_dev, "vpu: memory phy:0x%x %s phy memory\n",
658 check_memory.phy_addr, (ret ? "is" : "isn't"));
659 /* borrow .size to pass back the result. */
660 check_memory.size = ret;
661 ret = copy_to_user((void __user *)arg, &check_memory,
662 sizeof(struct vpu_mem_desc));
669 case VPU_IOC_LOCK_DEV:
673 if (get_user(lock_en, (u32 __user *) arg))
677 mutex_lock(&vpu_data.lock);
679 mutex_unlock(&vpu_data.lock);
685 dev_err(vpu_dev, "No such IOCTL, cmd is %d\n", cmd);
694 * @brief Release function for vpu file operation
695 * @return 0 on success or negative error code on error
697 static int vpu_release(struct inode *inode, struct file *filp)
700 unsigned long timeout;
702 mutex_lock(&vpu_data.lock);
704 if (open_count > 0 && !(--open_count)) {
706 /* Wait for vpu go to idle state */
707 clk_prepare(vpu_clk);
709 if (READ_REG(BIT_CUR_PC)) {
711 timeout = jiffies + HZ;
712 while (READ_REG(BIT_BUSY_FLAG)) {
714 if (time_after(jiffies, timeout)) {
715 dev_warn(vpu_dev, "VPU timeout during release\n");
719 clk_disable(vpu_clk);
720 clk_unprepare(vpu_clk);
722 /* Clean up interrupt */
723 cancel_work_sync(&vpu_data.work);
724 flush_workqueue(vpu_data.workqueue);
727 clk_prepare(vpu_clk);
729 if (READ_REG(BIT_BUSY_FLAG)) {
731 if (cpu_is_mx51() || cpu_is_mx53()) {
733 "fatal error: can't gate/power off when VPU is busy\n");
734 clk_disable(vpu_clk);
735 clk_unprepare(vpu_clk);
736 mutex_unlock(&vpu_data.lock);
740 #ifdef CONFIG_SOC_IMX6Q
741 if (cpu_is_mx6dl() || cpu_is_mx6q()) {
742 WRITE_REG(0x11, 0x10F0);
743 timeout = jiffies + HZ;
744 while (READ_REG(0x10F4) != 0x77) {
746 if (time_after(jiffies, timeout))
750 if (READ_REG(0x10F4) != 0x77) {
752 "fatal error: can't gate/power off when VPU is busy\n");
753 WRITE_REG(0x0, 0x10F0);
754 clk_disable(vpu_clk);
755 clk_unprepare(vpu_clk);
756 mutex_unlock(&vpu_data.lock);
764 clk_disable(vpu_clk);
765 clk_unprepare(vpu_clk);
769 /* Free shared memory when vpu device is idle */
770 vpu_free_dma_buffer(&share_mem);
771 share_mem.cpu_addr = 0;
772 vfree((void *)vshare_mem.cpu_addr);
773 vshare_mem.cpu_addr = 0;
775 vpu_clk_usercount = atomic_read(&clk_cnt_from_ioc);
776 for (i = 0; i < vpu_clk_usercount; i++) {
777 clk_disable(vpu_clk);
778 clk_unprepare(vpu_clk);
779 atomic_dec(&clk_cnt_from_ioc);
783 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
784 pm_runtime_put_sync_suspend(vpu_dev);
788 mutex_unlock(&vpu_data.lock);
794 * @brief fasync function for vpu file operation
795 * @return 0 on success or negative error code on error
797 static int vpu_fasync(int fd, struct file *filp, int mode)
799 struct vpu_priv *dev = (struct vpu_priv *)filp->private_data;
800 return fasync_helper(fd, filp, mode, &dev->async_queue);
804 * @brief memory map function of harware registers for vpu file operation
805 * @return 0 on success or negative error code on error
807 static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
811 vm->vm_flags |= VM_IO | VM_RESERVED;
813 * Since vpu registers have been mapped with ioremap() at probe
814 * which L_PTE_XN is 1, and the same physical address must be
815 * mapped multiple times with same type, so set L_PTE_XN to 1 here.
816 * Otherwise, there may be unexpected result in video codec.
818 vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
819 pfn = phy_vpu_base_addr >> PAGE_SHIFT;
820 dev_dbg(vpu_dev, "size=0x%x, page no.=0x%x\n",
821 (int)(vm->vm_end - vm->vm_start), (int)pfn);
822 return remap_pfn_range(vm, vm->vm_start, pfn, vm->vm_end - vm->vm_start,
823 vm->vm_page_prot) ? -EAGAIN : 0;
827 * @brief memory map function of memory for vpu file operation
828 * @return 0 on success or negative error code on error
830 static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
833 request_size = vm->vm_end - vm->vm_start;
835 dev_dbg(vpu_dev, "start=0x%x, pgoff=0x%x, size=0x%x\n",
836 (unsigned int)(vm->vm_start), (unsigned int)(vm->vm_pgoff),
839 vm->vm_flags |= VM_IO | VM_RESERVED;
840 vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
842 return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
843 request_size, vm->vm_page_prot) ? -EAGAIN : 0;
848 * @brief memory map function of vmalloced share memory
849 * @return 0 on success or negative error code on error
851 static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
855 ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
856 vm->vm_flags |= VM_IO;
861 * @brief memory map interface for vpu file operation
862 * @return 0 on success or negative error code on error
864 static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
866 unsigned long offset;
868 offset = vshare_mem.cpu_addr >> PAGE_SHIFT;
870 if (vm->vm_pgoff && (vm->vm_pgoff == offset))
871 return vpu_map_vshare_mem(fp, vm);
872 else if (vm->vm_pgoff)
873 return vpu_map_dma_mem(fp, vm);
875 return vpu_map_hwregs(fp, vm);
878 const struct file_operations vpu_fops = {
879 .owner = THIS_MODULE,
881 .unlocked_ioctl = vpu_ioctl,
882 .release = vpu_release,
883 .fasync = vpu_fasync,
888 * This function is called by the driver framework to initialize the vpu device.
889 * @param dev The device structure for the vpu passed in by the framework.
890 * @return 0 on success or negative error code on error
892 static int vpu_dev_probe(struct platform_device *pdev)
895 struct device *temp_class;
896 struct resource *res;
897 unsigned long addr = 0;
899 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
900 struct device_node *np = pdev->dev.of_node;
903 err = of_property_read_u32(np, "iramsize", (u32 *)&iramsize);
904 if (!err && iramsize)
905 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
907 iram_pool = of_get_named_gen_pool(np, "iram", 0);
909 dev_err(&pdev->dev, "iram pool not available\n");
913 iram_base = gen_pool_alloc(iram_pool, iramsize);
915 dev_err(&pdev->dev, "unable to alloc iram\n");
919 addr = gen_pool_virt_to_phys(iram_pool, iram_base);
922 iram_alloc(iramsize, &addr);
925 iram.start = iram.end = 0;
928 iram.end = addr + iramsize - 1;
932 vpu_plat = pdev->dev.platform_data;
934 if (vpu_plat && vpu_plat->iram_enable && vpu_plat->iram_size)
935 iram_alloc(vpu_plat->iram_size, &addr);
937 iram.start = iram.end = 0;
940 iram.end = addr + vpu_plat->iram_size - 1;
944 vpu_dev = &pdev->dev;
946 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
948 dev_err(vpu_dev, "vpu: unable to get vpu base addr\n");
951 phy_vpu_base_addr = res->start;
952 vpu_base = ioremap(res->start, res->end - res->start);
954 vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
956 dev_err(vpu_dev, "vpu: unable to get a major for VPU\n");
961 vpu_class = class_create(THIS_MODULE, "mxc_vpu");
962 if (IS_ERR(vpu_class)) {
963 err = PTR_ERR(vpu_class);
967 temp_class = device_create(vpu_class, NULL, MKDEV(vpu_major, 0),
969 if (IS_ERR(temp_class)) {
970 err = PTR_ERR(temp_class);
974 vpu_clk = clk_get(&pdev->dev, "vpu_clk");
975 if (IS_ERR(vpu_clk)) {
980 vpu_ipi_irq = platform_get_irq_byname(pdev, "vpu_ipi_irq");
981 if (vpu_ipi_irq < 0) {
982 dev_err(vpu_dev, "vpu: unable to get vpu interrupt\n");
986 err = request_irq(vpu_ipi_irq, vpu_ipi_irq_handler, 0, "VPU_CODEC_IRQ",
987 (void *)(&vpu_data));
990 if (vpu_power_get(true)) {
991 if (!(cpu_is_mx51() || cpu_is_mx53())) {
992 dev_err(vpu_dev, "failed to get vpu power\n");
995 /* regulator_get will return error on MX5x,
996 * just igore it everywhere*/
997 dev_warn(vpu_dev, "failed to get vpu power\n");
1001 #ifdef MXC_VPU_HAS_JPU
1002 vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
1003 if (vpu_jpu_irq < 0) {
1004 dev_err(vpu_dev, "vpu: unable to get vpu jpu interrupt\n");
1006 free_irq(vpu_ipi_irq, &vpu_data);
1009 err = request_irq(vpu_jpu_irq, vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
1010 "VPU_JPG_IRQ", (void *)(&vpu_data));
1012 free_irq(vpu_ipi_irq, &vpu_data);
1017 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1018 pm_runtime_enable(&pdev->dev);
1021 vpu_data.workqueue = create_workqueue("vpu_wq");
1022 INIT_WORK(&vpu_data.work, vpu_worker_callback);
1023 mutex_init(&vpu_data.lock);
1024 dev_info(vpu_dev, "VPU initialized\n");
1028 device_destroy(vpu_class, MKDEV(vpu_major, 0));
1029 class_destroy(vpu_class);
1031 unregister_chrdev(vpu_major, "mxc_vpu");
1038 static int vpu_dev_remove(struct platform_device *pdev)
1040 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1041 pm_runtime_disable(&pdev->dev);
1043 free_irq(vpu_ipi_irq, &vpu_data);
1044 #ifdef MXC_VPU_HAS_JPU
1045 free_irq(vpu_jpu_irq, &vpu_data);
1047 cancel_work_sync(&vpu_data.work);
1048 flush_workqueue(vpu_data.workqueue);
1049 destroy_workqueue(vpu_data.workqueue);
1052 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1054 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
1055 gen_pool_free(iram_pool, iram_base, iram.end-iram.start+1);
1057 iram_free(iram.start, iram.end-iram.start+1);
1060 if (vpu_plat && vpu_plat->iram_enable && vpu_plat->iram_size)
1061 iram_free(iram.start, vpu_plat->iram_size);
1064 vpu_power_get(false);
1069 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1070 static int vpu_suspend(struct device *dev)
1072 static int vpu_suspend(struct platform_device *pdev, pm_message_t state)
1076 unsigned long timeout;
1078 mutex_lock(&vpu_data.lock);
1079 if (open_count == 0) {
1080 /* VPU is released (all instances are freed),
1081 * clock is already off, context is no longer needed,
1082 * power is already off on MX6,
1083 * gate power on MX51 */
1084 if (cpu_is_mx51()) {
1085 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1091 /* Wait for vpu go to idle state, suspect vpu cannot be changed
1092 to idle state after about 1 sec */
1093 timeout = jiffies + HZ;
1094 clk_prepare(vpu_clk);
1095 clk_enable(vpu_clk);
1096 while (READ_REG(BIT_BUSY_FLAG)) {
1098 if (time_after(jiffies, timeout)) {
1099 clk_disable(vpu_clk);
1100 clk_unprepare(vpu_clk);
1101 mutex_unlock(&vpu_data.lock);
1105 clk_disable(vpu_clk);
1106 clk_unprepare(vpu_clk);
1108 /* Make sure clock is disabled before suspend */
1109 vpu_clk_usercount = atomic_read(&clk_cnt_from_ioc);
1110 for (i = 0; i < vpu_clk_usercount; i++) {
1111 clk_disable(vpu_clk);
1112 clk_unprepare(vpu_clk);
1115 if (cpu_is_mx53()) {
1116 mutex_unlock(&vpu_data.lock);
1120 if (bitwork_mem.cpu_addr != 0) {
1121 clk_prepare(vpu_clk);
1122 clk_enable(vpu_clk);
1123 /* Save 64 registers from BIT_CODE_BUF_ADDR */
1124 for (i = 0; i < 64; i++)
1125 regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
1126 pc_before_suspend = READ_REG(BIT_CUR_PC);
1127 clk_disable(vpu_clk);
1128 clk_unprepare(vpu_clk);
1131 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1136 /* If VPU is working before suspend, disable
1137 * regulator to make usecount right. */
1138 vpu_power_up(false);
1141 mutex_unlock(&vpu_data.lock);
1145 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1146 static int vpu_resume(struct device *dev)
1148 static int vpu_resume(struct platform_device *pdev)
1153 mutex_lock(&vpu_data.lock);
1154 if (open_count == 0) {
1155 /* VPU is released (all instances are freed),
1156 * clock should be kept off, context is no longer needed,
1157 * power should be kept off on MX6,
1158 * disable power gating on MX51 */
1159 if (cpu_is_mx51()) {
1160 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1169 /* If VPU is working before suspend, enable
1170 * regulator to make usecount right. */
1172 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1177 if (bitwork_mem.cpu_addr != 0) {
1178 u32 *p = (u32 *) bitwork_mem.cpu_addr;
1183 clk_prepare(vpu_clk);
1184 clk_enable(vpu_clk);
1186 pc = READ_REG(BIT_CUR_PC);
1188 dev_warn(vpu_dev, "Not power off after suspend (PC=0x%x)\n", pc);
1189 clk_disable(vpu_clk);
1190 clk_unprepare(vpu_clk);
1194 /* Restore registers */
1195 for (i = 0; i < 64; i++)
1196 WRITE_REG(regBk[i], BIT_CODE_BUF_ADDR + (i * 4));
1198 WRITE_REG(0x0, BIT_RESET_CTRL);
1199 WRITE_REG(0x0, BIT_CODE_RUN);
1200 /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
1201 #ifdef CONFIG_SOC_IMX6Q
1202 WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
1206 * Re-load boot code, from the codebuffer in external RAM.
1207 * Thankfully, we only need 4096 bytes, same for all platforms.
1209 for (i = 0; i < 2048; i += 4) {
1210 data = p[(i / 2) + 1];
1211 data_hi = (data >> 16) & 0xFFFF;
1212 data_lo = data & 0xFFFF;
1213 WRITE_REG((i << 16) | data_hi, BIT_CODE_DOWN);
1214 WRITE_REG(((i + 1) << 16) | data_lo,
1218 data_hi = (data >> 16) & 0xFFFF;
1219 data_lo = data & 0xFFFF;
1220 WRITE_REG(((i + 2) << 16) | data_hi,
1222 WRITE_REG(((i + 3) << 16) | data_lo,
1226 if (pc_before_suspend) {
1227 WRITE_REG(0x1, BIT_BUSY_FLAG);
1228 WRITE_REG(0x1, BIT_CODE_RUN);
1229 while (READ_REG(BIT_BUSY_FLAG))
1232 dev_warn(vpu_dev, "PC=0 before suspend\n");
1234 clk_disable(vpu_clk);
1235 clk_unprepare(vpu_clk);
1239 /* Recover vpu clock */
1240 for (i = 0; i < vpu_clk_usercount; i++) {
1241 clk_prepare(vpu_clk);
1242 clk_enable(vpu_clk);
1246 mutex_unlock(&vpu_data.lock);
1250 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1251 static int vpu_runtime_suspend(struct device *dev)
1253 release_bus_freq(BUS_FREQ_HIGH);
1257 static int vpu_runtime_resume(struct device *dev)
1259 request_bus_freq(BUS_FREQ_HIGH);
1263 static const struct dev_pm_ops vpu_pm_ops = {
1264 SET_RUNTIME_PM_OPS(vpu_runtime_suspend, vpu_runtime_resume, NULL)
1265 SET_SYSTEM_SLEEP_PM_OPS(vpu_suspend, vpu_resume)
1270 #define vpu_suspend NULL
1271 #define vpu_resume NULL
1272 #endif /* !CONFIG_PM */
1274 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1275 static const struct of_device_id vpu_of_match[] = {
1276 { .compatible = "fsl,imx6-vpu", },
1279 MODULE_DEVICE_TABLE(of, vpu_of_match);
1282 /*! Driver definition
1285 static struct platform_driver mxcvpu_driver = {
1288 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
1289 .of_match_table = vpu_of_match,
1295 .probe = vpu_dev_probe,
1296 .remove = vpu_dev_remove,
1297 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1298 .suspend = vpu_suspend,
1299 .resume = vpu_resume,
1303 static int __init vpu_init(void)
1305 int ret = platform_driver_register(&mxcvpu_driver);
1307 init_waitqueue_head(&vpu_queue);
1310 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
1312 top_address_DRAM = memblock_end_of_DRAM_with_reserved();
1318 static void __exit vpu_exit(void)
1320 if (vpu_major > 0) {
1321 device_destroy(vpu_class, MKDEV(vpu_major, 0));
1322 class_destroy(vpu_class);
1323 unregister_chrdev(vpu_major, "mxc_vpu");
1327 vpu_free_dma_buffer(&bitwork_mem);
1328 vpu_free_dma_buffer(&pic_para_mem);
1329 vpu_free_dma_buffer(&user_data_mem);
1331 /* reset VPU state */
1333 clk_prepare(vpu_clk);
1334 clk_enable(vpu_clk);
1336 clk_disable(vpu_clk);
1337 clk_unprepare(vpu_clk);
1338 vpu_power_up(false);
1342 platform_driver_unregister(&mxcvpu_driver);
1346 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1347 MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
1348 MODULE_LICENSE("GPL");
1350 module_init(vpu_init);
1351 module_exit(vpu_exit);