]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
ENGR00274761-1 Upgrade VPU driver for Linux 3.10 kernel
authorHongzhang Yang <Hongzhang.Yang@freescale.com>
Mon, 12 Aug 2013 07:14:23 +0000 (15:14 +0800)
committerLothar Waßmann <LW@KARO-electronics.de>
Wed, 20 Aug 2014 08:06:13 +0000 (10:06 +0200)
Pick files from origin/imx_3.5.7 commit 135bf02a0727ea5ce96
- mxc_vpu.h is picked from arch/arm/plat-mxc/include/mach/
  and put to include/linux/
- drivers/mxc/vpu/Kconfig
- drivers/mxc/vpu/Makefile
- drivers/mxc/vpu/mxc_vpu.c

Signed-off-by: Hongzhang Yang <Hongzhang.Yang@freescale.com>
drivers/mxc/vpu/Kconfig [new file with mode: 0644]
drivers/mxc/vpu/Makefile [new file with mode: 0644]
drivers/mxc/vpu/mxc_vpu.c [new file with mode: 0644]
include/linux/mxc_vpu.h [new file with mode: 0644]

diff --git a/drivers/mxc/vpu/Kconfig b/drivers/mxc/vpu/Kconfig
new file mode 100644 (file)
index 0000000..6562697
--- /dev/null
@@ -0,0 +1,31 @@
+#
+# Codec configuration
+#
+
+menu "MXC VPU(Video Processing Unit) support"
+
+config MXC_VPU
+         tristate "Support for MXC VPU(Video Processing Unit)"
+         depends on (ARCH_MX3 || ARCH_MX27 || ARCH_MX37 || ARCH_MX5 || ARCH_MX6)
+         default y
+       ---help---
+         The VPU codec device provides codec function for H.264/MPEG4/H.263,
+         as well as MPEG2/VC-1/DivX on some platforms.
+
+config MXC_VPU_DEBUG
+       bool "MXC VPU debugging"
+       depends on MXC_VPU != n
+       help
+         This is an option for the developers; most people should
+         say N here.  This enables MXC VPU driver debugging.
+
+config MX6_VPU_352M
+       bool "MX6 VPU 352M"
+       depends on MXC_VPU
+       default n
+       help
+        Increase VPU frequncy to 352M, the config will disable bus frequency
+        adjust dynamic, and CPU lowest setpoint will be 352Mhz.
+        This config is used for special VPU use case.
+
+endmenu
diff --git a/drivers/mxc/vpu/Makefile b/drivers/mxc/vpu/Makefile
new file mode 100644 (file)
index 0000000..1a821f4
--- /dev/null
@@ -0,0 +1,9 @@
+#
+# Makefile for the VPU drivers.
+#
+
+obj-$(CONFIG_MXC_VPU)                  += mxc_vpu.o
+
+ifeq ($(CONFIG_MXC_VPU_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/mxc/vpu/mxc_vpu.c b/drivers/mxc/vpu/mxc_vpu.c
new file mode 100644 (file)
index 0000000..a16c31f
--- /dev/null
@@ -0,0 +1,1280 @@
+/*
+ * Copyright 2006-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*!
+ * @file mxc_vpu.c
+ *
+ * @brief VPU system initialization and file operation implementation
+ *
+ * @ingroup VPU
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/stat.h>
+#include <linux/platform_device.h>
+#include <linux/kdev_t.h>
+#include <linux/dma-mapping.h>
+#include <linux/iram_alloc.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/fsl_devices.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/regulator/consumer.h>
+#include <linux/page-flags.h>
+#include <linux/mm_types.h>
+#include <linux/types.h>
+#include <linux/memblock.h>
+#include <linux/memory.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <mach/busfreq.h>
+#include <mach/hardware.h>
+#include <mach/common.h>
+#endif
+#include <asm/page.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+#include <linux/sizes.h>
+#else
+#include <asm/sizes.h>
+#endif
+#include <mach/clock.h>
+#include <mach/hardware.h>
+
+#include <mach/mxc_vpu.h>
+
+/* Define one new pgprot which combined uncached and XN(never executable) */
+#define pgprot_noncachedxn(prot) \
+       __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
+
+struct vpu_priv {
+       struct fasync_struct *async_queue;
+       struct work_struct work;
+       struct workqueue_struct *workqueue;
+       struct mutex lock;
+};
+
+/* To track the allocated memory buffer */
+struct memalloc_record {
+       struct list_head list;
+       struct vpu_mem_desc mem;
+};
+
+struct iram_setting {
+       u32 start;
+       u32 end;
+};
+
+static LIST_HEAD(head);
+
+static int vpu_major;
+static int vpu_clk_usercount;
+static struct class *vpu_class;
+static struct vpu_priv vpu_data;
+static u8 open_count;
+static struct clk *vpu_clk;
+static struct vpu_mem_desc bitwork_mem = { 0 };
+static struct vpu_mem_desc pic_para_mem = { 0 };
+static struct vpu_mem_desc user_data_mem = { 0 };
+static struct vpu_mem_desc share_mem = { 0 };
+static struct vpu_mem_desc vshare_mem = { 0 };
+
+static void __iomem *vpu_base;
+static int vpu_ipi_irq;
+static u32 phy_vpu_base_addr;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+static phys_addr_t top_address_DRAM;
+static struct mxc_vpu_platform_data *vpu_plat;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+static struct platform_device *vpu_pdev;
+#endif
+
+/* IRAM setting */
+static struct iram_setting iram;
+
+/* implement the blocking ioctl */
+static int irq_status;
+static int codec_done;
+static wait_queue_head_t vpu_queue;
+
+#ifdef CONFIG_SOC_IMX6Q
+#define MXC_VPU_HAS_JPU
+#endif
+
+#ifdef MXC_VPU_HAS_JPU
+static int vpu_jpu_irq;
+#endif
+
+static unsigned int regBk[64];
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+static struct regulator *vpu_regulator;
+#endif
+static unsigned int pc_before_suspend;
+static atomic_t clk_cnt_from_ioc = ATOMIC_INIT(0);
+
+#define        READ_REG(x)             readl_relaxed(vpu_base + x)
+#define        WRITE_REG(val, x)       writel_relaxed(val, vpu_base + x)
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+/* redirect to static functions */
+static int cpu_is_mx6dl(void)
+{
+       int ret;
+       ret = of_machine_is_compatible("fsl,imx6dl");
+       return ret;
+}
+
+static int cpu_is_mx6q(void)
+{
+       int ret;
+       ret = of_machine_is_compatible("fsl,imx6q");
+       return ret;
+}
+#endif
+
+/*!
+ * Private function to alloc dma buffer
+ * @return status  0 success.
+ */
+static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
+{
+       mem->cpu_addr = (unsigned long)
+           dma_alloc_coherent(NULL, PAGE_ALIGN(mem->size),
+                              (dma_addr_t *) (&mem->phy_addr),
+                              GFP_DMA | GFP_KERNEL);
+       pr_debug("[ALLOC] mem alloc cpu_addr = 0x%x\n", mem->cpu_addr);
+       if ((void *)(mem->cpu_addr) == NULL) {
+               printk(KERN_ERR "Physical memory allocation error!\n");
+               return -1;
+       }
+       return 0;
+}
+
+/*!
+ * Private function to free dma buffer
+ */
+static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
+{
+       if (mem->cpu_addr != 0) {
+               dma_free_coherent(0, PAGE_ALIGN(mem->size),
+                                 (void *)mem->cpu_addr, mem->phy_addr);
+       }
+}
+
+/*!
+ * Private function to free buffers
+ * @return status  0 success.
+ */
+static int vpu_free_buffers(void)
+{
+       struct memalloc_record *rec, *n;
+       struct vpu_mem_desc mem;
+
+       list_for_each_entry_safe(rec, n, &head, list) {
+               mem = rec->mem;
+               if (mem.cpu_addr != 0) {
+                       vpu_free_dma_buffer(&mem);
+                       pr_debug("[FREE] freed paddr=0x%08X\n", mem.phy_addr);
+                       /* delete from list */
+                       list_del(&rec->list);
+                       kfree(rec);
+               }
+       }
+
+       return 0;
+}
+
+static inline void vpu_worker_callback(struct work_struct *w)
+{
+       struct vpu_priv *dev = container_of(w, struct vpu_priv,
+                               work);
+
+       if (dev->async_queue)
+               kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
+
+       irq_status = 1;
+       /*
+        * Clock is gated on when dec/enc started, gate it off when
+        * codec is done.
+        */
+       if (codec_done)
+               codec_done = 0;
+
+       wake_up_interruptible(&vpu_queue);
+}
+
+/*!
+ * @brief vpu interrupt handler
+ */
+static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
+{
+       struct vpu_priv *dev = dev_id;
+       unsigned long reg;
+
+       reg = READ_REG(BIT_INT_REASON);
+       if (reg & 0x8)
+               codec_done = 1;
+       WRITE_REG(0x1, BIT_INT_CLEAR);
+
+       queue_work(dev->workqueue, &dev->work);
+
+       return IRQ_HANDLED;
+}
+
+/*!
+ * @brief vpu jpu interrupt handler
+ */
+#ifdef MXC_VPU_HAS_JPU
+static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
+{
+       struct vpu_priv *dev = dev_id;
+       unsigned long reg;
+
+       reg = READ_REG(MJPEG_PIC_STATUS_REG);
+       if (reg & 0x3)
+               codec_done = 1;
+
+       queue_work(dev->workqueue, &dev->work);
+
+       return IRQ_HANDLED;
+}
+#endif
+
+/*!
+ * @brief check phy memory prepare to pass to vpu is valid or not, we
+ * already address some issue that if pass a wrong address to vpu
+ * (like virtual address), system will hang.
+ *
+ * @return true return is a valid phy memory address, false return not.
+ */
+bool vpu_is_valid_phy_memory(u32 paddr)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+       if (paddr > top_address_DRAM)
+               return false;
+#endif
+
+       return true;
+}
+
+/*!
+ * @brief open function for vpu file operation
+ *
+ * @return  0 on success or negative error code on error
+ */
+static int vpu_open(struct inode *inode, struct file *filp)
+{
+
+       mutex_lock(&vpu_data.lock);
+
+       if (open_count++ == 0) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+               if (!IS_ERR(vpu_regulator))
+                       regulator_enable(vpu_regulator);
+#else
+               pm_runtime_get_sync(&vpu_pdev->dev);
+               imx_gpc_power_up_pu(true);
+#endif
+
+#ifdef CONFIG_SOC_IMX6Q
+               clk_prepare(vpu_clk);
+               clk_enable(vpu_clk);
+               if (READ_REG(BIT_CUR_PC))
+                       pr_debug("Not power off before vpu open!\n");
+               clk_disable(vpu_clk);
+               clk_unprepare(vpu_clk);
+#endif
+       }
+
+       filp->private_data = (void *)(&vpu_data);
+       mutex_unlock(&vpu_data.lock);
+       return 0;
+}
+
+/*!
+ * @brief IO ctrl function for vpu file operation
+ * @param cmd IO ctrl command
+ * @return  0 on success or negative error code on error
+ */
+static long vpu_ioctl(struct file *filp, u_int cmd,
+                    u_long arg)
+{
+       int ret = 0;
+
+       switch (cmd) {
+       case VPU_IOC_PHYMEM_ALLOC:
+               {
+                       struct memalloc_record *rec;
+
+                       rec = kzalloc(sizeof(*rec), GFP_KERNEL);
+                       if (!rec)
+                               return -ENOMEM;
+
+                       ret = copy_from_user(&(rec->mem),
+                                            (struct vpu_mem_desc *)arg,
+                                            sizeof(struct vpu_mem_desc));
+                       if (ret) {
+                               kfree(rec);
+                               return -EFAULT;
+                       }
+
+                       pr_debug("[ALLOC] mem alloc size = 0x%x\n",
+                                rec->mem.size);
+
+                       ret = vpu_alloc_dma_buffer(&(rec->mem));
+                       if (ret == -1) {
+                               kfree(rec);
+                               printk(KERN_ERR
+                                      "Physical memory allocation error!\n");
+                               break;
+                       }
+                       ret = copy_to_user((void __user *)arg, &(rec->mem),
+                                          sizeof(struct vpu_mem_desc));
+                       if (ret) {
+                               kfree(rec);
+                               ret = -EFAULT;
+                               break;
+                       }
+
+                       mutex_lock(&vpu_data.lock);
+                       list_add(&rec->list, &head);
+                       mutex_unlock(&vpu_data.lock);
+
+                       break;
+               }
+       case VPU_IOC_PHYMEM_FREE:
+               {
+                       struct memalloc_record *rec, *n;
+                       struct vpu_mem_desc vpu_mem;
+
+                       ret = copy_from_user(&vpu_mem,
+                                            (struct vpu_mem_desc *)arg,
+                                            sizeof(struct vpu_mem_desc));
+                       if (ret)
+                               return -EACCES;
+
+                       pr_debug("[FREE] mem freed cpu_addr = 0x%x\n",
+                                vpu_mem.cpu_addr);
+                       if ((void *)vpu_mem.cpu_addr != NULL)
+                               vpu_free_dma_buffer(&vpu_mem);
+
+                       mutex_lock(&vpu_data.lock);
+                       list_for_each_entry_safe(rec, n, &head, list) {
+                               if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
+                                       /* delete from list */
+                                       list_del(&rec->list);
+                                       kfree(rec);
+                                       break;
+                               }
+                       }
+                       mutex_unlock(&vpu_data.lock);
+
+                       break;
+               }
+       case VPU_IOC_WAIT4INT:
+               {
+                       u_long timeout = (u_long) arg;
+                       if (!wait_event_interruptible_timeout
+                           (vpu_queue, irq_status != 0,
+                            msecs_to_jiffies(timeout))) {
+                               printk(KERN_WARNING "VPU blocking: timeout.\n");
+                               ret = -ETIME;
+                       } else if (signal_pending(current)) {
+                               printk(KERN_WARNING
+                                      "VPU interrupt received.\n");
+                               ret = -ERESTARTSYS;
+                       } else
+                               irq_status = 0;
+                       break;
+               }
+       case VPU_IOC_IRAM_SETTING:
+               {
+                       ret = copy_to_user((void __user *)arg, &iram,
+                                          sizeof(struct iram_setting));
+                       if (ret)
+                               ret = -EFAULT;
+
+                       break;
+               }
+       case VPU_IOC_CLKGATE_SETTING:
+               {
+                       u32 clkgate_en;
+
+                       if (get_user(clkgate_en, (u32 __user *) arg))
+                               return -EFAULT;
+
+                       if (clkgate_en) {
+                               clk_prepare(vpu_clk);
+                               clk_enable(vpu_clk);
+                               atomic_inc(&clk_cnt_from_ioc);
+                       } else {
+                               clk_disable(vpu_clk);
+                               clk_unprepare(vpu_clk);
+                               atomic_dec(&clk_cnt_from_ioc);
+                       }
+
+                       break;
+               }
+       case VPU_IOC_GET_SHARE_MEM:
+               {
+                       mutex_lock(&vpu_data.lock);
+                       if (share_mem.cpu_addr != 0) {
+                               ret = copy_to_user((void __user *)arg,
+                                                  &share_mem,
+                                                  sizeof(struct vpu_mem_desc));
+                               mutex_unlock(&vpu_data.lock);
+                               break;
+                       } else {
+                               if (copy_from_user(&share_mem,
+                                                  (struct vpu_mem_desc *)arg,
+                                                sizeof(struct vpu_mem_desc))) {
+                                       mutex_unlock(&vpu_data.lock);
+                                       return -EFAULT;
+                               }
+                               if (vpu_alloc_dma_buffer(&share_mem) == -1)
+                                       ret = -EFAULT;
+                               else {
+                                       if (copy_to_user((void __user *)arg,
+                                                        &share_mem,
+                                                        sizeof(struct
+                                                               vpu_mem_desc)))
+                                               ret = -EFAULT;
+                               }
+                       }
+                       mutex_unlock(&vpu_data.lock);
+                       break;
+               }
+       case VPU_IOC_REQ_VSHARE_MEM:
+               {
+                       mutex_lock(&vpu_data.lock);
+                       if (vshare_mem.cpu_addr != 0) {
+                               ret = copy_to_user((void __user *)arg,
+                                                  &vshare_mem,
+                                                  sizeof(struct vpu_mem_desc));
+                               mutex_unlock(&vpu_data.lock);
+                               break;
+                       } else {
+                               if (copy_from_user(&vshare_mem,
+                                                  (struct vpu_mem_desc *)arg,
+                                                  sizeof(struct
+                                                         vpu_mem_desc))) {
+                                       mutex_unlock(&vpu_data.lock);
+                                       return -EFAULT;
+                               }
+                               /* vmalloc shared memory if not allocated */
+                               if (!vshare_mem.cpu_addr)
+                                       vshare_mem.cpu_addr =
+                                           (unsigned long)
+                                           vmalloc_user(vshare_mem.size);
+                               if (copy_to_user
+                                    ((void __user *)arg, &vshare_mem,
+                                    sizeof(struct vpu_mem_desc)))
+                                       ret = -EFAULT;
+                       }
+                       mutex_unlock(&vpu_data.lock);
+                       break;
+               }
+       case VPU_IOC_GET_WORK_ADDR:
+               {
+                       if (bitwork_mem.cpu_addr != 0) {
+                               ret =
+                                   copy_to_user((void __user *)arg,
+                                                &bitwork_mem,
+                                                sizeof(struct vpu_mem_desc));
+                               break;
+                       } else {
+                               if (copy_from_user(&bitwork_mem,
+                                                  (struct vpu_mem_desc *)arg,
+                                                  sizeof(struct vpu_mem_desc)))
+                                       return -EFAULT;
+
+                               if (vpu_alloc_dma_buffer(&bitwork_mem) == -1)
+                                       ret = -EFAULT;
+                               else if (copy_to_user((void __user *)arg,
+                                                     &bitwork_mem,
+                                                     sizeof(struct
+                                                            vpu_mem_desc)))
+                                       ret = -EFAULT;
+                       }
+                       break;
+               }
+       /*
+        * The following two ioctl is used when user allocates working buffer
+        * and register it to vpu driver.
+        */
+       case VPU_IOC_QUERY_BITWORK_MEM:
+               {
+                       if (copy_to_user((void __user *)arg,
+                                        &bitwork_mem,
+                                        sizeof(struct vpu_mem_desc)))
+                               ret = -EFAULT;
+                       break;
+               }
+       case VPU_IOC_SET_BITWORK_MEM:
+               {
+                       if (copy_from_user(&bitwork_mem,
+                                          (struct vpu_mem_desc *)arg,
+                                          sizeof(struct vpu_mem_desc)))
+                               ret = -EFAULT;
+                       break;
+               }
+       case VPU_IOC_SYS_SW_RESET:
+               {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+                       imx_src_reset_vpu();
+#else
+                       if (vpu_plat->reset)
+                               vpu_plat->reset();
+#endif
+
+                       break;
+               }
+       case VPU_IOC_REG_DUMP:
+               break;
+       case VPU_IOC_PHYMEM_DUMP:
+               break;
+       case VPU_IOC_PHYMEM_CHECK:
+       {
+               struct vpu_mem_desc check_memory;
+               ret = copy_from_user(&check_memory,
+                                    (void __user *)arg,
+                                    sizeof(struct vpu_mem_desc));
+               if (ret != 0) {
+                       printk(KERN_ERR "copy from user failure:%d\n", ret);
+                       ret = -EFAULT;
+                       break;
+               }
+               ret = vpu_is_valid_phy_memory((u32)check_memory.phy_addr);
+
+               pr_debug("vpu: memory phy:0x%x %s phy memory\n",
+                      check_memory.phy_addr, (ret ? "is" : "isn't"));
+               /* borrow .size to pass back the result. */
+               check_memory.size = ret;
+               ret = copy_to_user((void __user *)arg, &check_memory,
+                                  sizeof(struct vpu_mem_desc));
+               if (ret) {
+                       ret = -EFAULT;
+                       break;
+               }
+               break;
+       }
+       case VPU_IOC_LOCK_DEV:
+               {
+                       u32 lock_en;
+
+                       if (get_user(lock_en, (u32 __user *) arg))
+                               return -EFAULT;
+
+                       if (lock_en)
+                               mutex_lock(&vpu_data.lock);
+                       else
+                               mutex_unlock(&vpu_data.lock);
+
+                       break;
+               }
+       default:
+               {
+                       printk(KERN_ERR "No such IOCTL, cmd is %d\n", cmd);
+                       ret = -EINVAL;
+                       break;
+               }
+       }
+       return ret;
+}
+
+/*!
+ * @brief Release function for vpu file operation
+ * @return  0 on success or negative error code on error
+ */
+static int vpu_release(struct inode *inode, struct file *filp)
+{
+       int i;
+       unsigned long timeout;
+
+       mutex_lock(&vpu_data.lock);
+
+       if (open_count > 0 && !(--open_count)) {
+
+               /* Wait for vpu go to idle state */
+               clk_prepare(vpu_clk);
+               clk_enable(vpu_clk);
+               if (READ_REG(BIT_CUR_PC)) {
+
+                       timeout = jiffies + HZ;
+                       while (READ_REG(BIT_BUSY_FLAG)) {
+                               msleep(1);
+                               if (time_after(jiffies, timeout)) {
+                                       printk(KERN_WARNING "VPU timeout during release\n");
+                                       break;
+                               }
+                       }
+                       clk_disable(vpu_clk);
+                       clk_unprepare(vpu_clk);
+
+                       /* Clean up interrupt */
+                       cancel_work_sync(&vpu_data.work);
+                       flush_workqueue(vpu_data.workqueue);
+                       irq_status = 0;
+
+                       clk_prepare(vpu_clk);
+                       clk_enable(vpu_clk);
+                       if (READ_REG(BIT_BUSY_FLAG)) {
+
+                               if (cpu_is_mx51() || cpu_is_mx53()) {
+                                       printk(KERN_ERR
+                                               "fatal error: can't gate/power off when VPU is busy\n");
+                                       clk_disable(vpu_clk);
+                                       clk_unprepare(vpu_clk);
+                                       mutex_unlock(&vpu_data.lock);
+                                       return -EFAULT;
+                               }
+
+#ifdef CONFIG_SOC_IMX6Q
+                               if (cpu_is_mx6dl() || cpu_is_mx6q()) {
+                                       WRITE_REG(0x11, 0x10F0);
+                                       timeout = jiffies + HZ;
+                                       while (READ_REG(0x10F4) != 0x77) {
+                                               msleep(1);
+                                               if (time_after(jiffies, timeout))
+                                                       break;
+                                       }
+
+                                       if (READ_REG(0x10F4) != 0x77) {
+                                               printk(KERN_ERR
+                                                       "fatal error: can't gate/power off when VPU is busy\n");
+                                               WRITE_REG(0x0, 0x10F0);
+                                               clk_disable(vpu_clk);
+                                               clk_unprepare(vpu_clk);
+                                               mutex_unlock(&vpu_data.lock);
+                                               return -EFAULT;
+                                       } else {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+                                               imx_src_reset_vpu();
+#else
+                                               if (vpu_plat->reset)
+                                                       vpu_plat->reset();
+#endif
+                                       }
+                               }
+#endif
+                       }
+               }
+               clk_disable(vpu_clk);
+               clk_unprepare(vpu_clk);
+
+               vpu_free_buffers();
+
+               /* Free shared memory when vpu device is idle */
+               vpu_free_dma_buffer(&share_mem);
+               share_mem.cpu_addr = 0;
+               vfree((void *)vshare_mem.cpu_addr);
+               vshare_mem.cpu_addr = 0;
+
+               vpu_clk_usercount = atomic_read(&clk_cnt_from_ioc);
+               for (i = 0; i < vpu_clk_usercount; i++) {
+                       clk_disable(vpu_clk);
+                       clk_unprepare(vpu_clk);
+                       atomic_dec(&clk_cnt_from_ioc);
+               }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+               if (!IS_ERR(vpu_regulator))
+                       regulator_disable(vpu_regulator);
+#else
+               imx_gpc_power_up_pu(false);
+               pm_runtime_put_sync_suspend(&vpu_pdev->dev);
+#endif
+
+       }
+       mutex_unlock(&vpu_data.lock);
+
+       return 0;
+}
+
+/*!
+ * @brief fasync function for vpu file operation
+ * @return  0 on success or negative error code on error
+ */
+static int vpu_fasync(int fd, struct file *filp, int mode)
+{
+       struct vpu_priv *dev = (struct vpu_priv *)filp->private_data;
+       return fasync_helper(fd, filp, mode, &dev->async_queue);
+}
+
+/*!
+ * @brief memory map function of harware registers for vpu file operation
+ * @return  0 on success or negative error code on error
+ */
+static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
+{
+       unsigned long pfn;
+
+       vm->vm_flags |= VM_IO | VM_RESERVED;
+       /*
+        * Since vpu registers have been mapped with ioremap() at probe
+        * which L_PTE_XN is 1, and the same physical address must be
+        * mapped multiple times with same type, so set L_PTE_XN to 1 here.
+        * Otherwise, there may be unexpected result in video codec.
+        */
+       vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
+       pfn = phy_vpu_base_addr >> PAGE_SHIFT;
+       pr_debug("size=0x%x,  page no.=0x%x\n",
+                (int)(vm->vm_end - vm->vm_start), (int)pfn);
+       return remap_pfn_range(vm, vm->vm_start, pfn, vm->vm_end - vm->vm_start,
+                              vm->vm_page_prot) ? -EAGAIN : 0;
+}
+
+/*!
+ * @brief memory map function of memory for vpu file operation
+ * @return  0 on success or negative error code on error
+ */
+static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
+{
+       int request_size;
+       request_size = vm->vm_end - vm->vm_start;
+
+       pr_debug(" start=0x%x, pgoff=0x%x, size=0x%x\n",
+                (unsigned int)(vm->vm_start), (unsigned int)(vm->vm_pgoff),
+                request_size);
+
+       vm->vm_flags |= VM_IO | VM_RESERVED;
+       vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
+
+       return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
+                              request_size, vm->vm_page_prot) ? -EAGAIN : 0;
+
+}
+
+/* !
+ * @brief memory map function of vmalloced share memory
+ * @return  0 on success or negative error code on error
+ */
+static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
+{
+       int ret = -EINVAL;
+
+       ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
+       vm->vm_flags |= VM_IO;
+
+       return ret;
+}
+/*!
+ * @brief memory map interface for vpu file operation
+ * @return  0 on success or negative error code on error
+ */
+static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
+{
+       unsigned long offset;
+
+       offset = vshare_mem.cpu_addr >> PAGE_SHIFT;
+
+       if (vm->vm_pgoff && (vm->vm_pgoff == offset))
+               return vpu_map_vshare_mem(fp, vm);
+       else if (vm->vm_pgoff)
+               return vpu_map_dma_mem(fp, vm);
+       else
+               return vpu_map_hwregs(fp, vm);
+}
+
+const struct file_operations vpu_fops = {
+       .owner = THIS_MODULE,
+       .open = vpu_open,
+       .unlocked_ioctl = vpu_ioctl,
+       .release = vpu_release,
+       .fasync = vpu_fasync,
+       .mmap = vpu_mmap,
+};
+
+/*!
+ * This function is called by the driver framework to initialize the vpu device.
+ * @param   dev The device structure for the vpu passed in by the framework.
+ * @return   0 on success or negative error code on error
+ */
+static int vpu_dev_probe(struct platform_device *pdev)
+{
+       int err = 0;
+       struct device *temp_class;
+       struct resource *res;
+       unsigned long addr = 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+       struct device_node *np = pdev->dev.of_node;
+       u32 iramsize;
+
+       err = of_property_read_u32(np, "iramsize", (u32 *)&iramsize);
+       if (!err && iramsize)
+               iram_alloc(iramsize, &addr);
+       if (addr == 0)
+               iram.start = iram.end = 0;
+       else {
+               iram.start = addr;
+               iram.end = addr + iramsize - 1;
+       }
+
+       vpu_pdev = pdev;
+#else
+
+       vpu_plat = pdev->dev.platform_data;
+
+       if (vpu_plat && vpu_plat->iram_enable && vpu_plat->iram_size)
+               iram_alloc(vpu_plat->iram_size, &addr);
+       if (addr == 0)
+               iram.start = iram.end = 0;
+       else {
+               iram.start = addr;
+               iram.end = addr +  vpu_plat->iram_size - 1;
+       }
+#endif
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
+       if (!res) {
+               printk(KERN_ERR "vpu: unable to get vpu base addr\n");
+               return -ENODEV;
+       }
+       phy_vpu_base_addr = res->start;
+       vpu_base = ioremap(res->start, res->end - res->start);
+
+       vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
+       if (vpu_major < 0) {
+               printk(KERN_ERR "vpu: unable to get a major for VPU\n");
+               err = -EBUSY;
+               goto error;
+       }
+
+       vpu_class = class_create(THIS_MODULE, "mxc_vpu");
+       if (IS_ERR(vpu_class)) {
+               err = PTR_ERR(vpu_class);
+               goto err_out_chrdev;
+       }
+
+       temp_class = device_create(vpu_class, NULL, MKDEV(vpu_major, 0),
+                                  NULL, "mxc_vpu");
+       if (IS_ERR(temp_class)) {
+               err = PTR_ERR(temp_class);
+               goto err_out_class;
+       }
+
+       vpu_clk = clk_get(&pdev->dev, "vpu_clk");
+       if (IS_ERR(vpu_clk)) {
+               err = -ENOENT;
+               goto err_out_class;
+       }
+
+       vpu_ipi_irq = platform_get_irq_byname(pdev, "vpu_ipi_irq");
+       if (vpu_ipi_irq < 0) {
+               printk(KERN_ERR "vpu: unable to get vpu interrupt\n");
+               err = -ENXIO;
+               goto err_out_class;
+       }
+       err = request_irq(vpu_ipi_irq, vpu_ipi_irq_handler, 0, "VPU_CODEC_IRQ",
+                         (void *)(&vpu_data));
+       if (err)
+               goto err_out_class;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+       vpu_regulator = regulator_get(NULL, "cpu_vddvpu");
+       if (IS_ERR(vpu_regulator)) {
+               if (!(cpu_is_mx51() || cpu_is_mx53())) {
+                       printk(KERN_ERR
+                               "%s: failed to get vpu regulator\n", __func__);
+                       goto err_out_class;
+               } else {
+                       /* regulator_get will return error on MX5x,
+                        * just igore it everywhere*/
+                       printk(KERN_WARNING
+                               "%s: failed to get vpu regulator\n", __func__);
+               }
+       }
+#endif
+
+#ifdef MXC_VPU_HAS_JPU
+       vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
+       if (vpu_jpu_irq < 0) {
+               printk(KERN_ERR "vpu: unable to get vpu jpu interrupt\n");
+               err = -ENXIO;
+               free_irq(vpu_ipi_irq, &vpu_data);
+               goto err_out_class;
+       }
+       err = request_irq(vpu_jpu_irq, vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
+                         "VPU_JPG_IRQ", (void *)(&vpu_data));
+       if (err) {
+               free_irq(vpu_ipi_irq, &vpu_data);
+               goto err_out_class;
+       }
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+       pm_runtime_enable(&pdev->dev);
+#endif
+
+       vpu_data.workqueue = create_workqueue("vpu_wq");
+       INIT_WORK(&vpu_data.work, vpu_worker_callback);
+       mutex_init(&vpu_data.lock);
+       printk(KERN_INFO "VPU initialized\n");
+       goto out;
+
+err_out_class:
+       device_destroy(vpu_class, MKDEV(vpu_major, 0));
+       class_destroy(vpu_class);
+err_out_chrdev:
+       unregister_chrdev(vpu_major, "mxc_vpu");
+error:
+       iounmap(vpu_base);
+out:
+       return err;
+}
+
+static int vpu_dev_remove(struct platform_device *pdev)
+{
+       free_irq(vpu_ipi_irq, &vpu_data);
+#ifdef MXC_VPU_HAS_JPU
+       free_irq(vpu_jpu_irq, &vpu_data);
+#endif
+       cancel_work_sync(&vpu_data.work);
+       flush_workqueue(vpu_data.workqueue);
+       destroy_workqueue(vpu_data.workqueue);
+
+       iounmap(vpu_base);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+       if (iram.start)
+               iram_free(iram.start, iram.end-iram.start+1);
+#else
+       if (vpu_plat && vpu_plat->iram_enable && vpu_plat->iram_size)
+               iram_free(iram.start,  vpu_plat->iram_size);
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+       if (!IS_ERR(vpu_regulator))
+               regulator_put(vpu_regulator);
+#endif
+       return 0;
+}
+
+#ifdef CONFIG_PM
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+static int vpu_suspend(struct device *dev)
+#else
+static int vpu_suspend(struct platform_device *pdev, pm_message_t state)
+#endif
+{
+       int i;
+       unsigned long timeout;
+
+       mutex_lock(&vpu_data.lock);
+       if (open_count == 0) {
+               /* VPU is released (all instances are freed),
+                * clock is already off, context is no longer needed,
+                * power is already off on MX6,
+                * gate power on MX51 */
+               if (cpu_is_mx51()) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+                       if (vpu_plat->pg)
+                               vpu_plat->pg(1);
+#endif
+               }
+       } else {
+               /* Wait for vpu go to idle state, suspect vpu cannot be changed
+                  to idle state after about 1 sec */
+               timeout = jiffies + HZ;
+               clk_prepare(vpu_clk);
+               clk_enable(vpu_clk);
+               while (READ_REG(BIT_BUSY_FLAG)) {
+                       msleep(1);
+                       if (time_after(jiffies, timeout)) {
+                               clk_disable(vpu_clk);
+                               clk_unprepare(vpu_clk);
+                               mutex_unlock(&vpu_data.lock);
+                               return -EAGAIN;
+                       }
+               }
+               clk_disable(vpu_clk);
+               clk_unprepare(vpu_clk);
+
+               /* Make sure clock is disabled before suspend */
+               vpu_clk_usercount = atomic_read(&clk_cnt_from_ioc);
+               for (i = 0; i < vpu_clk_usercount; i++) {
+                       clk_disable(vpu_clk);
+                       clk_unprepare(vpu_clk);
+               }
+
+               if (cpu_is_mx53()) {
+                       mutex_unlock(&vpu_data.lock);
+                       return 0;
+               }
+
+               if (bitwork_mem.cpu_addr != 0) {
+                       clk_prepare(vpu_clk);
+                       clk_enable(vpu_clk);
+                       /* Save 64 registers from BIT_CODE_BUF_ADDR */
+                       for (i = 0; i < 64; i++)
+                               regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
+                       pc_before_suspend = READ_REG(BIT_CUR_PC);
+                       clk_disable(vpu_clk);
+                       clk_unprepare(vpu_clk);
+               }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+               if (vpu_plat->pg)
+                       vpu_plat->pg(1);
+#endif
+
+               /* If VPU is working before suspend, disable
+                * regulator to make usecount right. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+               if (!IS_ERR(vpu_regulator))
+                       regulator_disable(vpu_regulator);
+#else
+               imx_gpc_power_up_pu(false);
+#endif
+       }
+
+       mutex_unlock(&vpu_data.lock);
+       return 0;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+static int vpu_resume(struct device *dev)
+#else
+static int vpu_resume(struct platform_device *pdev)
+#endif
+{
+       int i;
+
+       mutex_lock(&vpu_data.lock);
+       if (open_count == 0) {
+               /* VPU is released (all instances are freed),
+                * clock should be kept off, context is no longer needed,
+                * power should be kept off on MX6,
+                * disable power gating on MX51 */
+               if (cpu_is_mx51()) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+                       if (vpu_plat->pg)
+                               vpu_plat->pg(0);
+#endif
+               }
+       } else {
+               if (cpu_is_mx53())
+                       goto recover_clk;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+               /* If VPU is working before suspend, enable
+                * regulator to make usecount right. */
+               if (!IS_ERR(vpu_regulator))
+                       regulator_enable(vpu_regulator);
+
+               if (vpu_plat->pg)
+                       vpu_plat->pg(0);
+#else
+               imx_gpc_power_up_pu(true);
+#endif
+
+               if (bitwork_mem.cpu_addr != 0) {
+                       u32 *p = (u32 *) bitwork_mem.cpu_addr;
+                       u32 data, pc;
+                       u16 data_hi;
+                       u16 data_lo;
+
+                       clk_prepare(vpu_clk);
+                       clk_enable(vpu_clk);
+
+                       pc = READ_REG(BIT_CUR_PC);
+                       if (pc) {
+                               printk(KERN_WARNING "Not power off after suspend (PC=0x%x)\n", pc);
+                               clk_disable(vpu_clk);
+                               clk_unprepare(vpu_clk);
+                               goto recover_clk;
+                       }
+
+                       /* Restore registers */
+                       for (i = 0; i < 64; i++)
+                               WRITE_REG(regBk[i], BIT_CODE_BUF_ADDR + (i * 4));
+
+                       WRITE_REG(0x0, BIT_RESET_CTRL);
+                       WRITE_REG(0x0, BIT_CODE_RUN);
+                       /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
+#ifdef CONFIG_SOC_IMX6Q
+                       WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
+#endif
+
+                       /*
+                        * Re-load boot code, from the codebuffer in external RAM.
+                        * Thankfully, we only need 4096 bytes, same for all platforms.
+                        */
+                       for (i = 0; i < 2048; i += 4) {
+                               data = p[(i / 2) + 1];
+                               data_hi = (data >> 16) & 0xFFFF;
+                               data_lo = data & 0xFFFF;
+                               WRITE_REG((i << 16) | data_hi, BIT_CODE_DOWN);
+                               WRITE_REG(((i + 1) << 16) | data_lo,
+                                               BIT_CODE_DOWN);
+
+                               data = p[i / 2];
+                               data_hi = (data >> 16) & 0xFFFF;
+                               data_lo = data & 0xFFFF;
+                               WRITE_REG(((i + 2) << 16) | data_hi,
+                                               BIT_CODE_DOWN);
+                               WRITE_REG(((i + 3) << 16) | data_lo,
+                                               BIT_CODE_DOWN);
+                       }
+
+                       if (pc_before_suspend) {
+                               WRITE_REG(0x1, BIT_BUSY_FLAG);
+                               WRITE_REG(0x1, BIT_CODE_RUN);
+                               while (READ_REG(BIT_BUSY_FLAG))
+                                       ;
+                       } else {
+                               printk(KERN_WARNING "PC=0 before suspend\n");
+                       }
+                       clk_disable(vpu_clk);
+                       clk_unprepare(vpu_clk);
+               }
+
+recover_clk:
+               /* Recover vpu clock */
+               for (i = 0; i < vpu_clk_usercount; i++) {
+                       clk_prepare(vpu_clk);
+                       clk_enable(vpu_clk);
+               }
+       }
+
+       mutex_unlock(&vpu_data.lock);
+       return 0;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+static int vpu_runtime_suspend(struct device *dev)
+{
+       release_bus_freq(BUS_FREQ_HIGH);
+       return 0;
+}
+
+static int vpu_runtime_resume(struct device *dev)
+{
+       request_bus_freq(BUS_FREQ_HIGH);
+       return 0;
+}
+
+static const struct dev_pm_ops vpu_pm_ops = {
+       SET_RUNTIME_PM_OPS(vpu_runtime_suspend, vpu_runtime_resume, NULL)
+       SET_SYSTEM_SLEEP_PM_OPS(vpu_suspend, vpu_resume)
+};
+#endif
+
+#else
+#define        vpu_suspend     NULL
+#define        vpu_resume      NULL
+#endif                         /* !CONFIG_PM */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+static const struct of_device_id vpu_of_match[] = {
+       { .compatible = "fsl,imx6-vpu", },
+       {/* sentinel */}
+};
+MODULE_DEVICE_TABLE(of, vpu_of_match);
+#endif
+
+/*! Driver definition
+ *
+ */
+static struct platform_driver mxcvpu_driver = {
+       .driver = {
+                  .name = "mxc_vpu",
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+                  .of_match_table = vpu_of_match,
+#ifdef CONFIG_PM
+                  .pm = &vpu_pm_ops,
+#endif
+#endif
+                  },
+       .probe = vpu_dev_probe,
+       .remove = vpu_dev_remove,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+       .suspend = vpu_suspend,
+       .resume = vpu_resume,
+#endif
+};
+
+static int __init vpu_init(void)
+{
+       int ret = platform_driver_register(&mxcvpu_driver);
+
+       init_waitqueue_head(&vpu_queue);
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+       memblock_analyze();
+       top_address_DRAM = memblock_end_of_DRAM_with_reserved();
+#endif
+
+       return ret;
+}
+
+static void __exit vpu_exit(void)
+{
+       if (vpu_major > 0) {
+               device_destroy(vpu_class, MKDEV(vpu_major, 0));
+               class_destroy(vpu_class);
+               unregister_chrdev(vpu_major, "mxc_vpu");
+               vpu_major = 0;
+       }
+
+       vpu_free_dma_buffer(&bitwork_mem);
+       vpu_free_dma_buffer(&pic_para_mem);
+       vpu_free_dma_buffer(&user_data_mem);
+
+       /* reset VPU state */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+       if (!IS_ERR(vpu_regulator))
+               regulator_enable(vpu_regulator);
+       clk_prepare(vpu_clk);
+       clk_enable(vpu_clk);
+       if (vpu_plat->reset)
+               vpu_plat->reset();
+       clk_disable(vpu_clk);
+       clk_unprepare(vpu_clk);
+       if (!IS_ERR(vpu_regulator))
+               regulator_disable(vpu_regulator);
+#else
+       imx_gpc_power_up_pu(true);
+       clk_prepare(vpu_clk);
+       clk_enable(vpu_clk);
+       imx_src_reset_vpu();
+       clk_disable(vpu_clk);
+       clk_unprepare(vpu_clk);
+       imx_gpc_power_up_pu(false);
+#endif
+
+       clk_put(vpu_clk);
+
+       platform_driver_unregister(&mxcvpu_driver);
+       return;
+}
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
+MODULE_LICENSE("GPL");
+
+module_init(vpu_init);
+module_exit(vpu_exit);
diff --git a/include/linux/mxc_vpu.h b/include/linux/mxc_vpu.h
new file mode 100644 (file)
index 0000000..7869d59
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU Lesser General
+ * Public License.  You may obtain a copy of the GNU Lesser General
+ * Public License Version 2.1 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/lgpl-license.html
+ * http://www.gnu.org/copyleft/lgpl.html
+ */
+
+/*!
+ * @defgroup VPU Video Processor Unit Driver
+ */
+
+/*!
+ * @file plat-mxc/include/mach/mxc_vpu.h
+ *
+ * @brief VPU system initialization and file operation definition
+ *
+ * @ingroup VPU
+ */
+
+#ifndef __ASM_ARCH_MXC_VPU_H__
+#define __ASM_ARCH_MXC_VPU_H__
+
+#include <linux/fs.h>
+
+struct mxc_vpu_platform_data {
+       bool iram_enable;
+       int  iram_size;
+       void (*reset) (void);
+       void (*pg) (int);
+};
+
+struct vpu_mem_desc {
+       u32 size;
+       dma_addr_t phy_addr;
+       u32 cpu_addr;           /* cpu address to free the dma mem */
+       u32 virt_uaddr;         /* virtual user space address */
+};
+
+#define VPU_IOC_MAGIC  'V'
+
+#define VPU_IOC_PHYMEM_ALLOC   _IO(VPU_IOC_MAGIC, 0)
+#define VPU_IOC_PHYMEM_FREE    _IO(VPU_IOC_MAGIC, 1)
+#define VPU_IOC_WAIT4INT       _IO(VPU_IOC_MAGIC, 2)
+#define VPU_IOC_PHYMEM_DUMP    _IO(VPU_IOC_MAGIC, 3)
+#define VPU_IOC_REG_DUMP       _IO(VPU_IOC_MAGIC, 4)
+#define VPU_IOC_IRAM_SETTING   _IO(VPU_IOC_MAGIC, 6)
+#define VPU_IOC_CLKGATE_SETTING        _IO(VPU_IOC_MAGIC, 7)
+#define VPU_IOC_GET_WORK_ADDR   _IO(VPU_IOC_MAGIC, 8)
+#define VPU_IOC_REQ_VSHARE_MEM _IO(VPU_IOC_MAGIC, 9)
+#define VPU_IOC_SYS_SW_RESET   _IO(VPU_IOC_MAGIC, 11)
+#define VPU_IOC_GET_SHARE_MEM   _IO(VPU_IOC_MAGIC, 12)
+#define VPU_IOC_QUERY_BITWORK_MEM  _IO(VPU_IOC_MAGIC, 13)
+#define VPU_IOC_SET_BITWORK_MEM    _IO(VPU_IOC_MAGIC, 14)
+#define VPU_IOC_PHYMEM_CHECK   _IO(VPU_IOC_MAGIC, 15)
+#define VPU_IOC_LOCK_DEV       _IO(VPU_IOC_MAGIC, 16)
+
+#define BIT_CODE_RUN                   0x000
+#define BIT_CODE_DOWN                  0x004
+#define BIT_INT_CLEAR                  0x00C
+#define BIT_INT_STATUS                 0x010
+#define BIT_CUR_PC                     0x018
+#define BIT_INT_REASON                 0x174
+
+#define MJPEG_PIC_STATUS_REG           0x3004
+#define MBC_SET_SUBBLK_EN              0x4A0
+
+#define BIT_WORK_CTRL_BUF_BASE         0x100
+#define BIT_WORK_CTRL_BUF_REG(i)       (BIT_WORK_CTRL_BUF_BASE + i * 4)
+#define BIT_CODE_BUF_ADDR              BIT_WORK_CTRL_BUF_REG(0)
+#define BIT_WORK_BUF_ADDR              BIT_WORK_CTRL_BUF_REG(1)
+#define BIT_PARA_BUF_ADDR              BIT_WORK_CTRL_BUF_REG(2)
+#define BIT_BIT_STREAM_CTRL            BIT_WORK_CTRL_BUF_REG(3)
+#define BIT_FRAME_MEM_CTRL             BIT_WORK_CTRL_BUF_REG(4)
+#define BIT_BIT_STREAM_PARAM           BIT_WORK_CTRL_BUF_REG(5)
+
+#ifndef CONFIG_ARCH_MX6
+#define BIT_RESET_CTRL                 0x11C
+#else
+#define BIT_RESET_CTRL                 0x128
+#endif
+
+/* i could be 0, 1, 2, 3 */
+#define        BIT_RD_PTR_BASE                 0x120
+#define BIT_RD_PTR_REG(i)              (BIT_RD_PTR_BASE + i * 8)
+#define BIT_WR_PTR_REG(i)              (BIT_RD_PTR_BASE + i * 8 + 4)
+
+/* i could be 0, 1, 2, 3 */
+#define BIT_FRM_DIS_FLG_BASE           (cpu_is_mx51() ? 0x150 : 0x140)
+#define        BIT_FRM_DIS_FLG_REG(i)          (BIT_FRM_DIS_FLG_BASE + i * 4)
+
+#define BIT_BUSY_FLAG                  0x160
+#define BIT_RUN_COMMAND                        0x164
+#define BIT_INT_ENABLE                 0x170
+
+#define        BITVAL_PIC_RUN                  8
+
+#define        VPU_SLEEP_REG_VALUE             10
+#define        VPU_WAKE_REG_VALUE              11
+
+int vl2cc_init(u32 vl2cc_hw_base);
+void vl2cc_enable(void);
+void vl2cc_flush(void);
+void vl2cc_disable(void);
+void vl2cc_cleanup(void);
+
+int vl2cc_init(u32 vl2cc_hw_base);
+void vl2cc_enable(void);
+void vl2cc_flush(void);
+void vl2cc_disable(void);
+void vl2cc_cleanup(void);
+
+#endif