]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/mxc/vpu/mxc_vpu.c
KARO: cleanup after merge of Freescale 3.10.17 stuff
[karo-tx-linux.git] / drivers / mxc / vpu / mxc_vpu.c
index 9baeeb4e3a512dd1cc06d28e552aea0444676e3a..564682c64a558a4a8b757fdbb1ab93f0d4648b31 100644 (file)
 #include <linux/memblock.h>
 #include <linux/memory.h>
 #include <linux/version.h>
-#include <asm/page.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
 #include <linux/sizes.h>
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
-#include <linux/iram_alloc.h>
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/mxc_vpu.h>
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
 #include <linux/genalloc.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/reset.h>
 #include <linux/clk.h>
 #include <linux/mxc_vpu.h>
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
-#include <mach/busfreq.h>
-#include <mach/common.h>
-#else
-#include <asm/sizes.h>
-#endif
 
 /* Define one new pgprot which combined uncached and XN(never executable) */
 #define pgprot_noncachedxn(prot) \
@@ -82,6 +64,15 @@ struct vpu_priv {
        struct work_struct work;
        struct workqueue_struct *workqueue;
        struct mutex lock;
+       const struct mxc_vpu_soc_data *soc_data;
+       int clk_enabled;
+       struct list_head users;
+};
+
+struct vpu_user_data {
+       struct vpu_priv *vpu_data;
+       struct list_head list;
+       int clk_enable_cnt;
 };
 
 /* To track the allocated memory buffer */
@@ -95,36 +86,38 @@ struct iram_setting {
        u32 end;
 };
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
+struct mxc_vpu_soc_data {
+       unsigned vpu_pwr_mgmnt:1,
+               regulator_required:1,
+               quirk_subblk_en:1,
+               is_mx51:1,
+               is_mx53:1,
+               is_mx6dl:1,
+               is_mx6q:1,
+               has_jpu:1;
+};
+
 static struct gen_pool *iram_pool;
 static u32 iram_base;
-#endif
 
-static LIST_HEAD(head);
+static LIST_HEAD(mem_list);
 
 static int vpu_major;
-static int vpu_clk_usercount;
 static struct class *vpu_class;
 static struct vpu_priv vpu_data;
 static u8 open_count;
 static struct clk *vpu_clk;
-static struct vpu_mem_desc bitwork_mem = { 0 };
-static struct vpu_mem_desc pic_para_mem = { 0 };
-static struct vpu_mem_desc user_data_mem = { 0 };
-static struct vpu_mem_desc share_mem = { 0 };
-static struct vpu_mem_desc vshare_mem = { 0 };
+static struct vpu_mem_desc bitwork_mem;
+static struct vpu_mem_desc pic_para_mem;
+static struct vpu_mem_desc user_data_mem;
+static struct vpu_mem_desc share_mem;
+static struct vpu_mem_desc vshare_mem;
 
 static void __iomem *vpu_base;
 static int vpu_ipi_irq;
 static u32 phy_vpu_base_addr;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
-static phys_addr_t top_address_DRAM;
-static struct mxc_vpu_platform_data *vpu_plat;
-#endif
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
-static struct platform_device *vpu_pdev;
-#endif
+static struct device *vpu_dev;
 
 /* IRAM setting */
 static struct iram_setting iram;
@@ -134,73 +127,68 @@ static int irq_status;
 static int codec_done;
 static wait_queue_head_t vpu_queue;
 
-#ifdef CONFIG_SOC_IMX6Q
-#define MXC_VPU_HAS_JPU
-#endif
-
-#ifdef MXC_VPU_HAS_JPU
 static int vpu_jpu_irq;
-#endif
 
+#ifdef CONFIG_PM
 static unsigned int regBk[64];
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
-static struct regulator *vpu_regulator;
-#endif
 static unsigned int pc_before_suspend;
-static atomic_t clk_cnt_from_ioc = ATOMIC_INIT(0);
+#endif
+static struct regulator *vpu_regulator;
 
-#define        READ_REG(x)             readl_relaxed(vpu_base + x)
-#define        WRITE_REG(val, x)       writel_relaxed(val, vpu_base + x)
+#define        READ_REG(x)             readl_relaxed(vpu_base + (x))
+#define        WRITE_REG(val, x)       writel_relaxed(val, vpu_base + (x))
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
-/* redirect to static functions */
-static int cpu_is_mx6dl(void)
+static int vpu_clk_enable(struct vpu_priv *vpu_data)
 {
-       int ret;
-       ret = of_machine_is_compatible("fsl,imx6dl");
-       return ret;
-}
+       int ret = 0;
+
+       if (vpu_data->clk_enabled++ == 0)
+               ret = clk_prepare_enable(vpu_clk);
+
+       if (WARN_ON(vpu_data->clk_enabled <= 0))
+               return -EINVAL;
 
-static int cpu_is_mx6q(void)
-{
-       int ret;
-       ret = of_machine_is_compatible("fsl,imx6q");
        return ret;
 }
-#endif
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
-static void imx_src_reset_vpu(void)
+static int vpu_clk_disable(struct vpu_priv *vpu_data)
 {
-       device_reset(&vpu_pdev->dev);
-}
+       if (WARN_ON(vpu_data->clk_enabled == 0))
+               return -EINVAL;
 
-static void imx_gpc_power_up_pu(int on)
-{
+       if (--vpu_data->clk_enabled == 0)
+               clk_disable_unprepare(vpu_clk);
+       return 0;
 }
 
-static void request_bus_freq(int freq)
+static inline int vpu_reset(void)
 {
+       return device_reset(vpu_dev);
 }
 
-static void release_bus_freq(int freq)
+static void vpu_power_up(void)
 {
-}
+       int ret;
 
-static int cpu_is_mx53(void)
-{
-       return 0;
+       if (IS_ERR(vpu_regulator))
+               return;
+
+       ret = regulator_enable(vpu_regulator);
+       if (ret)
+               dev_err(vpu_dev, "failed to power up vpu: %d\n", ret);
 }
 
-static int cpu_is_mx51(void)
+static void vpu_power_down(void)
 {
-       return 0;
-}
+       int ret;
 
-#define VM_RESERVED 0
-#define BUS_FREQ_HIGH 0
+       if (IS_ERR(vpu_regulator))
+               return;
 
-#endif
+       ret = regulator_disable(vpu_regulator);
+       if (ret)
+               dev_err(vpu_dev, "failed to power down vpu: %d\n", ret);
+}
 
 /*!
  * Private function to alloc dma buffer
@@ -208,14 +196,13 @@ static int cpu_is_mx51(void)
  */
 static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
 {
-       mem->cpu_addr = (unsigned long)
-           dma_alloc_coherent(NULL, PAGE_ALIGN(mem->size),
-                              (dma_addr_t *) (&mem->phy_addr),
-                              GFP_DMA | GFP_KERNEL);
-       pr_debug("[ALLOC] mem alloc cpu_addr = 0x%x\n", mem->cpu_addr);
-       if ((void *)(mem->cpu_addr) == NULL) {
-               printk(KERN_ERR "Physical memory allocation error!\n");
-               return -1;
+       mem->cpu_addr = dma_alloc_coherent(vpu_dev, PAGE_ALIGN(mem->size),
+                                       &mem->phy_addr,
+                                       GFP_DMA | GFP_KERNEL);
+       dev_dbg(vpu_dev, "[ALLOC] mem alloc cpu_addr = %p\n", mem->cpu_addr);
+       if (mem->cpu_addr == NULL) {
+               dev_err(vpu_dev, "Physical memory allocation error!\n");
+               return -ENOMEM;
        }
        return 0;
 }
@@ -225,10 +212,9 @@ static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
  */
 static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
 {
-       if (mem->cpu_addr != 0) {
-               dma_free_coherent(0, PAGE_ALIGN(mem->size),
-                                 (void *)mem->cpu_addr, mem->phy_addr);
-       }
+       if (mem->cpu_addr != NULL)
+               dma_free_coherent(vpu_dev, PAGE_ALIGN(mem->size),
+                               mem->cpu_addr, mem->phy_addr);
 }
 
 /*!
@@ -240,11 +226,11 @@ static int vpu_free_buffers(void)
        struct memalloc_record *rec, *n;
        struct vpu_mem_desc mem;
 
-       list_for_each_entry_safe(rec, n, &head, list) {
+       list_for_each_entry_safe(rec, n, &mem_list, list) {
                mem = rec->mem;
                if (mem.cpu_addr != 0) {
                        vpu_free_dma_buffer(&mem);
-                       pr_debug("[FREE] freed paddr=0x%08X\n", mem.phy_addr);
+                       dev_dbg(vpu_dev, "[FREE] freed paddr=0x%08X\n", mem.phy_addr);
                        /* delete from list */
                        list_del(&rec->list);
                        kfree(rec);
@@ -256,8 +242,7 @@ static int vpu_free_buffers(void)
 
 static inline void vpu_worker_callback(struct work_struct *w)
 {
-       struct vpu_priv *dev = container_of(w, struct vpu_priv,
-                               work);
+       struct vpu_priv *dev = container_of(w, struct vpu_priv, work);
 
        if (dev->async_queue)
                kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
@@ -294,7 +279,6 @@ static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
 /*!
  * @brief vpu jpu interrupt handler
  */
-#ifdef MXC_VPU_HAS_JPU
 static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
 {
        struct vpu_priv *dev = dev_id;
@@ -308,24 +292,6 @@ static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
 
        return IRQ_HANDLED;
 }
-#endif
-
-/*!
- * @brief check phy memory prepare to pass to vpu is valid or not, we
- * already address some issue that if pass a wrong address to vpu
- * (like virtual address), system will hang.
- *
- * @return true return is a valid phy memory address, false return not.
- */
-bool vpu_is_valid_phy_memory(u32 paddr)
-{
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
-       if (paddr > top_address_DRAM)
-               return false;
-#endif
-
-       return true;
-}
 
 /*!
  * @brief open function for vpu file operation
@@ -334,29 +300,25 @@ bool vpu_is_valid_phy_memory(u32 paddr)
  */
 static int vpu_open(struct inode *inode, struct file *filp)
 {
+       struct vpu_user_data *user_data = devm_kzalloc(vpu_dev,
+                                               sizeof(*user_data),
+                                               GFP_KERNEL);
+       if (user_data == NULL)
+               return -ENOMEM;
+
+       user_data->vpu_data = &vpu_data;
+
+       INIT_LIST_HEAD(&user_data->list);
+       list_add(&user_data->list, &vpu_data.users);
 
        mutex_lock(&vpu_data.lock);
 
        if (open_count++ == 0) {
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
-               if (!IS_ERR(vpu_regulator))
-                       regulator_enable(vpu_regulator);
-#else
-               pm_runtime_get_sync(&vpu_pdev->dev);
-               imx_gpc_power_up_pu(true);
-#endif
-
-#ifdef CONFIG_SOC_IMX6Q
-               clk_prepare(vpu_clk);
-               clk_enable(vpu_clk);
-               if (READ_REG(BIT_CUR_PC))
-                       pr_debug("Not power off before vpu open!\n");
-               clk_disable(vpu_clk);
-               clk_unprepare(vpu_clk);
-#endif
+               pm_runtime_get_sync(vpu_dev);
+               vpu_power_up();
        }
 
-       filp->private_data = (void *)(&vpu_data);
+       filp->private_data = user_data;
        mutex_unlock(&vpu_data.lock);
        return 0;
 }
@@ -369,285 +331,250 @@ static int vpu_open(struct inode *inode, struct file *filp)
 static long vpu_ioctl(struct file *filp, u_int cmd,
                     u_long arg)
 {
-       int ret = 0;
+       int ret = -EINVAL;
+       struct vpu_user_data *user_data = filp->private_data;
+       struct vpu_priv *vpu_data = user_data->vpu_data;
 
        switch (cmd) {
        case VPU_IOC_PHYMEM_ALLOC:
-               {
-                       struct memalloc_record *rec;
+       {
+               struct memalloc_record *rec;
 
-                       rec = kzalloc(sizeof(*rec), GFP_KERNEL);
-                       if (!rec)
-                               return -ENOMEM;
+               rec = kzalloc(sizeof(*rec), GFP_KERNEL);
+               if (!rec)
+                       return -ENOMEM;
 
-                       ret = copy_from_user(&(rec->mem),
-                                            (struct vpu_mem_desc *)arg,
-                                            sizeof(struct vpu_mem_desc));
-                       if (ret) {
-                               kfree(rec);
-                               return -EFAULT;
-                       }
+               if (copy_from_user(&rec->mem,
+                                       (struct vpu_mem_desc *)arg,
+                                       sizeof(struct vpu_mem_desc))) {
+                       kfree(rec);
+                       return -EFAULT;
+               }
 
-                       pr_debug("[ALLOC] mem alloc size = 0x%x\n",
-                                rec->mem.size);
+               dev_dbg(vpu_dev, "[ALLOC] mem alloc size = 0x%x\n",
+                       rec->mem.size);
 
-                       ret = vpu_alloc_dma_buffer(&(rec->mem));
-                       if (ret == -1) {
-                               kfree(rec);
-                               printk(KERN_ERR
-                                      "Physical memory allocation error!\n");
-                               break;
-                       }
-                       ret = copy_to_user((void __user *)arg, &(rec->mem),
-                                          sizeof(struct vpu_mem_desc));
-                       if (ret) {
-                               kfree(rec);
-                               ret = -EFAULT;
-                               break;
-                       }
+               ret = vpu_alloc_dma_buffer(&rec->mem);
+               if (ret) {
+                       kfree(rec);
+                       return ret;
+               }
+               if (copy_to_user((void __user *)arg, &rec->mem,
+                                       sizeof(struct vpu_mem_desc))) {
+                       kfree(rec);
+                       return -EFAULT;
+               }
 
-                       mutex_lock(&vpu_data.lock);
-                       list_add(&rec->list, &head);
-                       mutex_unlock(&vpu_data.lock);
+               mutex_lock(&vpu_data->lock);
+               list_add(&rec->list, &mem_list);
+               mutex_unlock(&vpu_data->lock);
 
-                       break;
-               }
+               break;
+       }
        case VPU_IOC_PHYMEM_FREE:
-               {
-                       struct memalloc_record *rec, *n;
-                       struct vpu_mem_desc vpu_mem;
-
-                       ret = copy_from_user(&vpu_mem,
-                                            (struct vpu_mem_desc *)arg,
-                                            sizeof(struct vpu_mem_desc));
-                       if (ret)
-                               return -EACCES;
-
-                       pr_debug("[FREE] mem freed cpu_addr = 0x%x\n",
-                                vpu_mem.cpu_addr);
-                       if ((void *)vpu_mem.cpu_addr != NULL)
-                               vpu_free_dma_buffer(&vpu_mem);
-
-                       mutex_lock(&vpu_data.lock);
-                       list_for_each_entry_safe(rec, n, &head, list) {
-                               if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
-                                       /* delete from list */
-                                       list_del(&rec->list);
-                                       kfree(rec);
-                                       break;
-                               }
+       {
+               struct memalloc_record *rec, *n;
+               struct vpu_mem_desc vpu_mem;
+
+               if (copy_from_user(&vpu_mem,
+                                       (struct vpu_mem_desc *)arg,
+                                       sizeof(struct vpu_mem_desc)))
+                       return -EFAULT;
+
+               dev_dbg(vpu_dev, "[FREE] mem freed cpu_addr = %p\n",
+                       vpu_mem.cpu_addr);
+               if (vpu_mem.cpu_addr != NULL)
+                       vpu_free_dma_buffer(&vpu_mem);
+
+               mutex_lock(&vpu_data->lock);
+               list_for_each_entry_safe(rec, n, &mem_list, list) {
+                       if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
+                               list_del(&rec->list);
+                               break;
                        }
-                       mutex_unlock(&vpu_data.lock);
-
-                       break;
                }
+               kfree(rec);
+               mutex_unlock(&vpu_data->lock);
+
+               break;
+       }
        case VPU_IOC_WAIT4INT:
-               {
-                       u_long timeout = (u_long) arg;
-                       if (!wait_event_interruptible_timeout
-                           (vpu_queue, irq_status != 0,
-                            msecs_to_jiffies(timeout))) {
-                               printk(KERN_WARNING "VPU blocking: timeout.\n");
-                               ret = -ETIME;
-                       } else if (signal_pending(current)) {
-                               printk(KERN_WARNING
-                                      "VPU interrupt received.\n");
-                               ret = -ERESTARTSYS;
-                       } else
-                               irq_status = 0;
-                       break;
+       {
+               u_long timeout = arg;
+
+               ret = wait_event_interruptible_timeout(vpu_queue,
+                                               irq_status != 0,
+                                               msecs_to_jiffies(timeout));
+               if (ret == 0) {
+                       dev_warn(vpu_dev, "VPU blocking: timeout.\n");
+                       ret = -ETIMEDOUT;
+               } else if (signal_pending(current)) {
+                       dev_warn(vpu_dev, "VPU interrupt received.\n");
+                       ret = -ERESTARTSYS;
+               } else {
+                       irq_status = 0;
                }
+               break;
+       }
        case VPU_IOC_IRAM_SETTING:
-               {
-                       ret = copy_to_user((void __user *)arg, &iram,
-                                          sizeof(struct iram_setting));
-                       if (ret)
-                               ret = -EFAULT;
+               ret = copy_to_user((void __user *)arg, &iram,
+                               sizeof(struct iram_setting));
+               if (ret)
+                       ret = -EFAULT;
 
-                       break;
-               }
+               break;
        case VPU_IOC_CLKGATE_SETTING:
-               {
-                       u32 clkgate_en;
+       {
+               u32 clkgate_en;
 
-                       if (get_user(clkgate_en, (u32 __user *) arg))
-                               return -EFAULT;
+               if (get_user(clkgate_en, (u32 __user *)arg))
+                       return -EFAULT;
 
-                       if (clkgate_en) {
-                               clk_prepare(vpu_clk);
-                               clk_enable(vpu_clk);
-                               atomic_inc(&clk_cnt_from_ioc);
+               mutex_lock(&vpu_data->lock);
+               if (clkgate_en) {
+                       ret = vpu_clk_enable(vpu_data);
+                       if (ret == 0)
+                               user_data->clk_enable_cnt++;
+               } else {
+                       if (user_data->clk_enable_cnt == 0) {
+                               ret = -EINVAL;
                        } else {
-                               clk_disable(vpu_clk);
-                               clk_unprepare(vpu_clk);
-                               atomic_dec(&clk_cnt_from_ioc);
+                               if (--user_data->clk_enable_cnt == 0)
+                                       vpu_clk_disable(vpu_data);
+                               ret = 0;
                        }
-
-                       break;
                }
+               mutex_unlock(&vpu_data->lock);
+               break;
+       }
        case VPU_IOC_GET_SHARE_MEM:
-               {
-                       mutex_lock(&vpu_data.lock);
-                       if (share_mem.cpu_addr != 0) {
-                               ret = copy_to_user((void __user *)arg,
-                                                  &share_mem,
-                                                  sizeof(struct vpu_mem_desc));
-                               mutex_unlock(&vpu_data.lock);
-                               break;
-                       } else {
-                               if (copy_from_user(&share_mem,
-                                                  (struct vpu_mem_desc *)arg,
-                                                sizeof(struct vpu_mem_desc))) {
-                                       mutex_unlock(&vpu_data.lock);
-                                       return -EFAULT;
-                               }
-                               if (vpu_alloc_dma_buffer(&share_mem) == -1)
-                                       ret = -EFAULT;
-                               else {
-                                       if (copy_to_user((void __user *)arg,
-                                                        &share_mem,
-                                                        sizeof(struct
-                                                               vpu_mem_desc)))
-                                               ret = -EFAULT;
-                               }
+               mutex_lock(&vpu_data->lock);
+               if (share_mem.cpu_addr == NULL) {
+                       if (copy_from_user(&share_mem,
+                                               (struct vpu_mem_desc *)arg,
+                                               sizeof(struct vpu_mem_desc))) {
+                               mutex_unlock(&vpu_data->lock);
+                               return -EFAULT;
+                       }
+                       ret = vpu_alloc_dma_buffer(&share_mem);
+                       if (ret) {
+                               mutex_unlock(&vpu_data->lock);
+                               return ret;
                        }
-                       mutex_unlock(&vpu_data.lock);
-                       break;
                }
+               if (copy_to_user((void __user *)arg,
+                                       &share_mem,
+                                       sizeof(struct vpu_mem_desc)))
+                       ret = -EFAULT;
+               else
+                       ret = 0;
+               mutex_unlock(&vpu_data->lock);
+               break;
        case VPU_IOC_REQ_VSHARE_MEM:
-               {
-                       mutex_lock(&vpu_data.lock);
-                       if (vshare_mem.cpu_addr != 0) {
-                               ret = copy_to_user((void __user *)arg,
-                                                  &vshare_mem,
-                                                  sizeof(struct vpu_mem_desc));
-                               mutex_unlock(&vpu_data.lock);
-                               break;
-                       } else {
-                               if (copy_from_user(&vshare_mem,
-                                                  (struct vpu_mem_desc *)arg,
-                                                  sizeof(struct
-                                                         vpu_mem_desc))) {
-                                       mutex_unlock(&vpu_data.lock);
-                                       return -EFAULT;
-                               }
-                               /* vmalloc shared memory if not allocated */
-                               if (!vshare_mem.cpu_addr)
-                                       vshare_mem.cpu_addr =
-                                           (unsigned long)
-                                           vmalloc_user(vshare_mem.size);
-                               if (copy_to_user
-                                    ((void __user *)arg, &vshare_mem,
-                                    sizeof(struct vpu_mem_desc)))
-                                       ret = -EFAULT;
+               mutex_lock(&vpu_data->lock);
+               if (vshare_mem.cpu_addr == NULL) {
+                       if (copy_from_user(&vshare_mem,
+                                               (struct vpu_mem_desc *)arg,
+                                               sizeof(struct
+                                                       vpu_mem_desc))) {
+                               mutex_unlock(&vpu_data->lock);
+                               return -EFAULT;
+                       }
+                       vshare_mem.cpu_addr = vmalloc_user(vshare_mem.size);
+                       if (vshare_mem.cpu_addr == NULL) {
+                               mutex_unlock(&vpu_data->lock);
+                               return -ENOMEM;
                        }
-                       mutex_unlock(&vpu_data.lock);
-                       break;
                }
+               if (copy_to_user((void __user *)arg, &vshare_mem,
+                                       sizeof(struct vpu_mem_desc)))
+                       ret = -EFAULT;
+               else
+                       ret = 0;
+               mutex_unlock(&vpu_data->lock);
+               break;
        case VPU_IOC_GET_WORK_ADDR:
-               {
-                       if (bitwork_mem.cpu_addr != 0) {
-                               ret =
-                                   copy_to_user((void __user *)arg,
-                                                &bitwork_mem,
-                                                sizeof(struct vpu_mem_desc));
-                               break;
-                       } else {
-                               if (copy_from_user(&bitwork_mem,
-                                                  (struct vpu_mem_desc *)arg,
-                                                  sizeof(struct vpu_mem_desc)))
-                                       return -EFAULT;
-
-                               if (vpu_alloc_dma_buffer(&bitwork_mem) == -1)
-                                       ret = -EFAULT;
-                               else if (copy_to_user((void __user *)arg,
-                                                     &bitwork_mem,
-                                                     sizeof(struct
-                                                            vpu_mem_desc)))
-                                       ret = -EFAULT;
-                       }
-                       break;
+               if (bitwork_mem.cpu_addr == 0) {
+                       if (copy_from_user(&bitwork_mem,
+                                               (struct vpu_mem_desc *)arg,
+                                               sizeof(struct vpu_mem_desc)))
+                               return -EFAULT;
+
+                       ret = vpu_alloc_dma_buffer(&bitwork_mem);
+                       if (ret)
+                               return ret;
                }
+               if (copy_to_user((void __user *)arg,
+                                       &bitwork_mem,
+                                       sizeof(struct
+                                               vpu_mem_desc)))
+                       ret = -EFAULT;
+               else
+                       ret = 0;
+               break;
        /*
-        * The following two ioctl is used when user allocates working buffer
-        * and register it to vpu driver.
+        * The following two ioctls are used when user allocates a working buffer
+        * and registers it to vpu driver.
         */
        case VPU_IOC_QUERY_BITWORK_MEM:
-               {
-                       if (copy_to_user((void __user *)arg,
-                                        &bitwork_mem,
-                                        sizeof(struct vpu_mem_desc)))
-                               ret = -EFAULT;
-                       break;
-               }
+               if (copy_to_user((void __user *)arg,
+                                       &bitwork_mem,
+                                       sizeof(struct vpu_mem_desc)))
+                       ret = -EFAULT;
+               else
+                       ret = 0;
+               break;
        case VPU_IOC_SET_BITWORK_MEM:
-               {
-                       if (copy_from_user(&bitwork_mem,
-                                          (struct vpu_mem_desc *)arg,
-                                          sizeof(struct vpu_mem_desc)))
-                               ret = -EFAULT;
-                       break;
-               }
+               if (copy_from_user(&bitwork_mem,
+                                       (struct vpu_mem_desc *)arg,
+                                       sizeof(struct vpu_mem_desc)))
+                       ret = -EFAULT;
+               else
+                       ret = 0;
+               break;
        case VPU_IOC_SYS_SW_RESET:
-               {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
-                       imx_src_reset_vpu();
-#else
-                       if (vpu_plat->reset)
-                               vpu_plat->reset();
-#endif
-
-                       break;
-               }
-       case VPU_IOC_REG_DUMP:
+               ret = vpu_reset();
                break;
+       case VPU_IOC_REG_DUMP:
        case VPU_IOC_PHYMEM_DUMP:
+               ret = 0;
                break;
        case VPU_IOC_PHYMEM_CHECK:
        {
                struct vpu_mem_desc check_memory;
+
                ret = copy_from_user(&check_memory,
-                                    (void __user *)arg,
-                                    sizeof(struct vpu_mem_desc));
+                               (void __user *)arg,
+                               sizeof(struct vpu_mem_desc));
                if (ret != 0) {
-                       printk(KERN_ERR "copy from user failure:%d\n", ret);
+                       dev_err(vpu_dev, "copy from user failure:%d\n", ret);
                        ret = -EFAULT;
                        break;
                }
-               ret = vpu_is_valid_phy_memory((u32)check_memory.phy_addr);
-
-               pr_debug("vpu: memory phy:0x%x %s phy memory\n",
-                      check_memory.phy_addr, (ret ? "is" : "isn't"));
-               /* borrow .size to pass back the result. */
-               check_memory.size = ret;
-               ret = copy_to_user((void __user *)arg, &check_memory,
-                                  sizeof(struct vpu_mem_desc));
-               if (ret) {
+               check_memory.size = 1;
+               if (copy_to_user((void __user *)arg, &check_memory,
+                                       sizeof(struct vpu_mem_desc)))
                        ret = -EFAULT;
-                       break;
-               }
+               else
+                       ret = 0;
                break;
        }
        case VPU_IOC_LOCK_DEV:
-               {
-                       u32 lock_en;
-
-                       if (get_user(lock_en, (u32 __user *) arg))
-                               return -EFAULT;
+       {
+               u32 lock_en;
 
-                       if (lock_en)
-                               mutex_lock(&vpu_data.lock);
-                       else
-                               mutex_unlock(&vpu_data.lock);
+               if (get_user(lock_en, (u32 __user *)arg))
+                       return -EFAULT;
 
-                       break;
-               }
+               if (lock_en)
+                       mutex_lock(&vpu_data->lock);
+               else
+                       mutex_unlock(&vpu_data->lock);
+               ret = 0;
+               break;
+       }
        default:
-               {
-                       printk(KERN_ERR "No such IOCTL, cmd is %d\n", cmd);
-                       ret = -EINVAL;
-                       break;
-               }
+               dev_err(vpu_dev, "No such IOCTL, cmd is %d\n", cmd);
        }
        return ret;
 }
@@ -658,49 +585,42 @@ static long vpu_ioctl(struct file *filp, u_int cmd,
  */
 static int vpu_release(struct inode *inode, struct file *filp)
 {
-       int i;
        unsigned long timeout;
+       struct vpu_user_data *user_data = filp->private_data;
+       struct vpu_priv *vpu_data = user_data->vpu_data;
 
-       mutex_lock(&vpu_data.lock);
-
-       if (open_count > 0 && !(--open_count)) {
+       mutex_lock(&vpu_data->lock);
 
+       if (open_count > 0 && !--open_count) {
                /* Wait for vpu go to idle state */
-               clk_prepare(vpu_clk);
-               clk_enable(vpu_clk);
+               vpu_clk_enable(vpu_data);
                if (READ_REG(BIT_CUR_PC)) {
 
                        timeout = jiffies + HZ;
                        while (READ_REG(BIT_BUSY_FLAG)) {
                                msleep(1);
                                if (time_after(jiffies, timeout)) {
-                                       printk(KERN_WARNING "VPU timeout during release\n");
+                                       dev_warn(vpu_dev, "VPU timeout during release\n");
                                        break;
                                }
                        }
-                       clk_disable(vpu_clk);
-                       clk_unprepare(vpu_clk);
 
                        /* Clean up interrupt */
-                       cancel_work_sync(&vpu_data.work);
-                       flush_workqueue(vpu_data.workqueue);
+                       cancel_work_sync(&vpu_data->work);
+                       flush_workqueue(vpu_data->workqueue);
                        irq_status = 0;
 
-                       clk_prepare(vpu_clk);
-                       clk_enable(vpu_clk);
                        if (READ_REG(BIT_BUSY_FLAG)) {
-
-                               if (cpu_is_mx51() || cpu_is_mx53()) {
-                                       printk(KERN_ERR
+                               if (vpu_data->soc_data->is_mx51 ||
+                                       vpu_data->soc_data->is_mx53) {
+                                       dev_err(vpu_dev,
                                                "fatal error: can't gate/power off when VPU is busy\n");
-                                       clk_disable(vpu_clk);
-                                       clk_unprepare(vpu_clk);
-                                       mutex_unlock(&vpu_data.lock);
-                                       return -EFAULT;
+                                       vpu_clk_disable(vpu_data);
+                                       mutex_unlock(&vpu_data->lock);
+                                       return -EBUSY;
                                }
-
-#ifdef CONFIG_SOC_IMX6Q
-                               if (cpu_is_mx6dl() || cpu_is_mx6q()) {
+                               if (vpu_data->soc_data->is_mx6dl ||
+                                       vpu_data->soc_data->is_mx6q) {
                                        WRITE_REG(0x11, 0x10F0);
                                        timeout = jiffies + HZ;
                                        while (READ_REG(0x10F4) != 0x77) {
@@ -710,53 +630,35 @@ static int vpu_release(struct inode *inode, struct file *filp)
                                        }
 
                                        if (READ_REG(0x10F4) != 0x77) {
-                                               printk(KERN_ERR
+                                               dev_err(vpu_dev,
                                                        "fatal error: can't gate/power off when VPU is busy\n");
                                                WRITE_REG(0x0, 0x10F0);
-                                               clk_disable(vpu_clk);
-                                               clk_unprepare(vpu_clk);
-                                               mutex_unlock(&vpu_data.lock);
-                                               return -EFAULT;
-                                       } else {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
-                                               imx_src_reset_vpu();
-#else
-                                               if (vpu_plat->reset)
-                                                       vpu_plat->reset();
-#endif
+                                               vpu_clk_disable(vpu_data);
+                                               mutex_unlock(&vpu_data->lock);
+                                               return -EBUSY;
                                        }
+                                       vpu_reset();
                                }
-#endif
                        }
                }
-               clk_disable(vpu_clk);
-               clk_unprepare(vpu_clk);
 
                vpu_free_buffers();
 
                /* Free shared memory when vpu device is idle */
                vpu_free_dma_buffer(&share_mem);
                share_mem.cpu_addr = 0;
-               vfree((void *)vshare_mem.cpu_addr);
+               vfree(vshare_mem.cpu_addr);
                vshare_mem.cpu_addr = 0;
 
-               vpu_clk_usercount = atomic_read(&clk_cnt_from_ioc);
-               for (i = 0; i < vpu_clk_usercount; i++) {
-                       clk_disable(vpu_clk);
-                       clk_unprepare(vpu_clk);
-                       atomic_dec(&clk_cnt_from_ioc);
-               }
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
-               if (!IS_ERR(vpu_regulator))
-                       regulator_disable(vpu_regulator);
-#else
-               imx_gpc_power_up_pu(false);
-               pm_runtime_put_sync_suspend(&vpu_pdev->dev);
-#endif
+               if (user_data->clk_enable_cnt)
+                       vpu_clk_disable(vpu_data);
 
+               vpu_clk_disable(vpu_data);
+               vpu_power_down();
+               pm_runtime_put_sync_suspend(vpu_dev);
+               devm_kfree(vpu_dev, user_data);
        }
-       mutex_unlock(&vpu_data.lock);
+       mutex_unlock(&vpu_data->lock);
 
        return 0;
 }
@@ -767,8 +669,9 @@ static int vpu_release(struct inode *inode, struct file *filp)
  */
 static int vpu_fasync(int fd, struct file *filp, int mode)
 {
-       struct vpu_priv *dev = (struct vpu_priv *)filp->private_data;
-       return fasync_helper(fd, filp, mode, &dev->async_queue);
+       struct vpu_user_data *user_data = filp->private_data;
+       struct vpu_priv *vpu_data = user_data->vpu_data;
+       return fasync_helper(fd, filp, mode, &vpu_data->async_queue);
 }
 
 /*!
@@ -779,7 +682,7 @@ static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
 {
        unsigned long pfn;
 
-       vm->vm_flags |= VM_IO | VM_RESERVED;
+       vm->vm_flags |= VM_IO;
        /*
         * Since vpu registers have been mapped with ioremap() at probe
         * which L_PTE_XN is 1, and the same physical address must be
@@ -788,10 +691,11 @@ static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
         */
        vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
        pfn = phy_vpu_base_addr >> PAGE_SHIFT;
-       pr_debug("size=0x%x,  page no.=0x%x\n",
-                (int)(vm->vm_end - vm->vm_start), (int)pfn);
-       return remap_pfn_range(vm, vm->vm_start, pfn, vm->vm_end - vm->vm_start,
-                              vm->vm_page_prot) ? -EAGAIN : 0;
+       dev_dbg(vpu_dev, "size=0x%lx, page no.=0x%lx\n",
+                vm->vm_end - vm->vm_start, pfn);
+       return remap_pfn_range(vm, vm->vm_start, pfn,
+                       vm->vm_end - vm->vm_start,
+                       vm->vm_page_prot) ? -EAGAIN : 0;
 }
 
 /*!
@@ -800,19 +704,16 @@ static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
  */
 static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
 {
-       int request_size;
-       request_size = vm->vm_end - vm->vm_start;
+       size_t request_size = vm->vm_end - vm->vm_start;
 
-       pr_debug(" start=0x%x, pgoff=0x%x, size=0x%x\n",
-                (unsigned int)(vm->vm_start), (unsigned int)(vm->vm_pgoff),
-                request_size);
+       dev_dbg(vpu_dev, "start=0x%08lx, pgoff=0x%08lx, size=%zx\n",
+               vm->vm_start, vm->vm_pgoff, request_size);
 
-       vm->vm_flags |= VM_IO | VM_RESERVED;
+       vm->vm_flags |= VM_IO;
        vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
 
        return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
                               request_size, vm->vm_page_prot) ? -EAGAIN : 0;
-
 }
 
 /* !
@@ -821,11 +722,10 @@ static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
  */
 static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
 {
-       int ret = -EINVAL;
+       int ret;
 
        ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
        vm->vm_flags |= VM_IO;
-
        return ret;
 }
 /*!
@@ -836,7 +736,7 @@ static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
 {
        unsigned long offset;
 
-       offset = vshare_mem.cpu_addr >> PAGE_SHIFT;
+       offset = (unsigned long)vshare_mem.cpu_addr >> PAGE_SHIFT;
 
        if (vm->vm_pgoff && (vm->vm_pgoff == offset))
                return vpu_map_vshare_mem(fp, vm);
@@ -846,7 +746,7 @@ static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
                return vpu_map_hwregs(fp, vm);
 }
 
-const struct file_operations vpu_fops = {
+static const struct file_operations vpu_fops = {
        .owner = THIS_MODULE,
        .open = vpu_open,
        .unlocked_ioctl = vpu_ioctl,
@@ -855,6 +755,35 @@ const struct file_operations vpu_fops = {
        .mmap = vpu_mmap,
 };
 
+static const struct mxc_vpu_soc_data imx6dl_vpu_data = {
+       .regulator_required = 1,
+       .vpu_pwr_mgmnt = 1,
+       .has_jpu = 1,
+};
+
+static const struct mxc_vpu_soc_data imx6q_vpu_data = {
+       .quirk_subblk_en = 1,
+       .regulator_required = 1,
+       .vpu_pwr_mgmnt = 1,
+       .has_jpu = 1,
+};
+
+static const struct mxc_vpu_soc_data imx53_vpu_data = {
+};
+
+static const struct mxc_vpu_soc_data imx51_vpu_data = {
+       .vpu_pwr_mgmnt = 1,
+};
+
+static const struct of_device_id vpu_of_match[] = {
+       { .compatible = "fsl,imx6dl-vpu", .data = &imx6dl_vpu_data, },
+       { .compatible = "fsl,imx6q-vpu", .data = &imx6q_vpu_data, },
+       { .compatible = "fsl,imx53-vpu", .data = &imx53_vpu_data, },
+       { .compatible = "fsl,imx51-vpu", .data = &imx51_vpu_data, },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vpu_of_match);
+
 /*!
  * This function is called by the driver framework to initialize the vpu device.
  * @param   dev The device structure for the vpu passed in by the framework.
@@ -866,32 +795,39 @@ static int vpu_dev_probe(struct platform_device *pdev)
        struct device *temp_class;
        struct resource *res;
        unsigned long addr = 0;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
        struct device_node *np = pdev->dev.of_node;
        u32 iramsize;
+       struct vpu_priv *drv_data;
+       const struct of_device_id *of_id = of_match_device(vpu_of_match,
+                                                       &pdev->dev);
+       const struct mxc_vpu_soc_data *soc_data = of_id->data;
 
-       err = of_property_read_u32(np, "iramsize", (u32 *)&iramsize);
-       if (!err && iramsize)
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
-       {
+       drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
+       if (drv_data == NULL)
+               return -ENOMEM;
+
+       drv_data->soc_data = soc_data;
+       INIT_LIST_HEAD(&drv_data->users);
+
+       init_waitqueue_head(&vpu_queue);
+
+       err = of_property_read_u32(np, "iramsize", &iramsize);
+       if (!err && iramsize) {
                iram_pool = of_get_named_gen_pool(np, "iram", 0);
                if (!iram_pool) {
-                       printk(KERN_ERR "iram pool not available\n");
+                       dev_err(&pdev->dev, "iram pool not available\n");
                        return -ENOMEM;
                }
 
                iram_base = gen_pool_alloc(iram_pool, iramsize);
                if (!iram_base) {
-                       printk(KERN_ERR "unable to alloc iram\n");
+                       dev_err(&pdev->dev, "unable to alloc iram\n");
                        return -ENOMEM;
                }
 
                addr = gen_pool_virt_to_phys(iram_pool, iram_base);
        }
-#else
-               iram_alloc(iramsize, &addr);
-#endif
+
        if (addr == 0)
                iram.start = iram.end = 0;
        else {
@@ -899,34 +835,22 @@ static int vpu_dev_probe(struct platform_device *pdev)
                iram.end = addr + iramsize - 1;
        }
 
-       vpu_pdev = pdev;
-#else
-
-       vpu_plat = pdev->dev.platform_data;
-
-       if (vpu_plat && vpu_plat->iram_enable && vpu_plat->iram_size)
-               iram_alloc(vpu_plat->iram_size, &addr);
-       if (addr == 0)
-               iram.start = iram.end = 0;
-       else {
-               iram.start = addr;
-               iram.end = addr +  vpu_plat->iram_size - 1;
-       }
-#endif
+       vpu_dev = &pdev->dev;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
        if (!res) {
-               printk(KERN_ERR "vpu: unable to get vpu base addr\n");
+               dev_err(vpu_dev, "vpu: unable to get vpu base addr\n");
                return -ENODEV;
        }
        phy_vpu_base_addr = res->start;
-       vpu_base = ioremap(res->start, res->end - res->start);
+       vpu_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(vpu_base))
+               return PTR_ERR(vpu_base);
 
        vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
        if (vpu_major < 0) {
-               printk(KERN_ERR "vpu: unable to get a major for VPU\n");
-               err = -EBUSY;
-               goto error;
+               dev_err(vpu_dev, "vpu: unable to get a major for VPU\n");
+               return vpu_major;
        }
 
        vpu_class = class_create(THIS_MODULE, "mxc_vpu");
@@ -944,236 +868,180 @@ static int vpu_dev_probe(struct platform_device *pdev)
 
        vpu_clk = clk_get(&pdev->dev, "vpu_clk");
        if (IS_ERR(vpu_clk)) {
-               err = -ENOENT;
+               err = PTR_ERR(vpu_clk);
                goto err_out_class;
        }
 
        vpu_ipi_irq = platform_get_irq_byname(pdev, "vpu_ipi_irq");
        if (vpu_ipi_irq < 0) {
-               printk(KERN_ERR "vpu: unable to get vpu interrupt\n");
-               err = -ENXIO;
+               dev_err(vpu_dev, "vpu: unable to get vpu interrupt\n");
+               err = vpu_ipi_irq;
                goto err_out_class;
        }
        err = request_irq(vpu_ipi_irq, vpu_ipi_irq_handler, 0, "VPU_CODEC_IRQ",
-                         (void *)(&vpu_data));
+                         &vpu_data);
        if (err)
                goto err_out_class;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
-       vpu_regulator = regulator_get(NULL, "cpu_vddvpu");
+
+       vpu_regulator = devm_regulator_get(vpu_dev, "pu");
        if (IS_ERR(vpu_regulator)) {
-               if (!(cpu_is_mx51() || cpu_is_mx53())) {
-                       printk(KERN_ERR
-                               "%s: failed to get vpu regulator\n", __func__);
+               if (drv_data->soc_data->regulator_required) {
+                       dev_err(vpu_dev, "failed to get vpu power\n");
                        goto err_out_class;
                } else {
                        /* regulator_get will return error on MX5x,
-                        * just igore it everywhere*/
-                       printk(KERN_WARNING
-                               "%s: failed to get vpu regulator\n", __func__);
+                        * just igore it everywhere
+                        */
+                       dev_warn(vpu_dev, "failed to get vpu power\n");
                }
        }
-#endif
 
-#ifdef MXC_VPU_HAS_JPU
-       vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
-       if (vpu_jpu_irq < 0) {
-               printk(KERN_ERR "vpu: unable to get vpu jpu interrupt\n");
-               err = -ENXIO;
-               free_irq(vpu_ipi_irq, &vpu_data);
-               goto err_out_class;
-       }
-       err = request_irq(vpu_jpu_irq, vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
-                         "VPU_JPG_IRQ", (void *)(&vpu_data));
-       if (err) {
-               free_irq(vpu_ipi_irq, &vpu_data);
-               goto err_out_class;
+       platform_set_drvdata(pdev, drv_data);
+
+       if (drv_data->soc_data->has_jpu) {
+               vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
+               if (vpu_jpu_irq < 0) {
+                       dev_err(vpu_dev, "vpu: unable to get vpu jpu interrupt\n");
+                       err = vpu_jpu_irq;
+                       goto err_out_class;
+               }
+               err = request_irq(vpu_jpu_irq, vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
+                               "VPU_JPG_IRQ", &vpu_data);
+               if (err)
+                       goto err_out_class;
        }
-#endif
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
        pm_runtime_enable(&pdev->dev);
-#endif
 
        vpu_data.workqueue = create_workqueue("vpu_wq");
        INIT_WORK(&vpu_data.work, vpu_worker_callback);
        mutex_init(&vpu_data.lock);
-       printk(KERN_INFO "VPU initialized\n");
-       goto out;
+       dev_info(vpu_dev, "VPU initialized\n");
+       return 0;
 
 err_out_class:
        device_destroy(vpu_class, MKDEV(vpu_major, 0));
        class_destroy(vpu_class);
 err_out_chrdev:
        unregister_chrdev(vpu_major, "mxc_vpu");
-error:
-       iounmap(vpu_base);
-out:
        return err;
 }
 
 static int vpu_dev_remove(struct platform_device *pdev)
 {
+       struct vpu_priv *vpu_data = platform_get_drvdata(pdev);
+
+       pm_runtime_disable(&pdev->dev);
+
        free_irq(vpu_ipi_irq, &vpu_data);
 #ifdef MXC_VPU_HAS_JPU
        free_irq(vpu_jpu_irq, &vpu_data);
 #endif
-       cancel_work_sync(&vpu_data.work);
-       flush_workqueue(vpu_data.workqueue);
-       destroy_workqueue(vpu_data.workqueue);
+       cancel_work_sync(&vpu_data->work);
+       flush_workqueue(vpu_data->workqueue);
+       destroy_workqueue(vpu_data->workqueue);
 
        iounmap(vpu_base);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
        if (iram.start)
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
                gen_pool_free(iram_pool, iram_base, iram.end-iram.start+1);
-#else
-               iram_free(iram.start, iram.end-iram.start+1);
-#endif
-#else
-       if (vpu_plat && vpu_plat->iram_enable && vpu_plat->iram_size)
-               iram_free(iram.start,  vpu_plat->iram_size);
-#endif
 
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
-       if (!IS_ERR(vpu_regulator))
-               regulator_put(vpu_regulator);
-#endif
+       if (vpu_major > 0) {
+               device_destroy(vpu_class, MKDEV(vpu_major, 0));
+               class_destroy(vpu_class);
+               unregister_chrdev(vpu_major, "mxc_vpu");
+               vpu_major = 0;
+       }
+
+       vpu_free_dma_buffer(&bitwork_mem);
+       vpu_free_dma_buffer(&pic_para_mem);
+       vpu_free_dma_buffer(&user_data_mem);
+
+       /* reset VPU state */
+       vpu_power_up();
+       vpu_clk_enable(vpu_data);
+       vpu_reset();
+       vpu_clk_disable(vpu_data);
+       vpu_power_down();
+
+       clk_put(vpu_clk);
        return 0;
 }
 
 #ifdef CONFIG_PM
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
 static int vpu_suspend(struct device *dev)
-#else
-static int vpu_suspend(struct platform_device *pdev, pm_message_t state)
-#endif
 {
-       int i;
+       struct vpu_priv *vpu_data = dev_get_drvdata(dev);
        unsigned long timeout;
 
-       mutex_lock(&vpu_data.lock);
-       if (open_count == 0) {
-               /* VPU is released (all instances are freed),
-                * clock is already off, context is no longer needed,
-                * power is already off on MX6,
-                * gate power on MX51 */
-               if (cpu_is_mx51()) {
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
-                       if (vpu_plat->pg)
-                               vpu_plat->pg(1);
-#endif
-               }
-       } else {
+       mutex_lock(&vpu_data->lock);
+
+       if (open_count) {
                /* Wait for vpu go to idle state, suspect vpu cannot be changed
                   to idle state after about 1 sec */
                timeout = jiffies + HZ;
-               clk_prepare(vpu_clk);
-               clk_enable(vpu_clk);
                while (READ_REG(BIT_BUSY_FLAG)) {
                        msleep(1);
                        if (time_after(jiffies, timeout)) {
-                               clk_disable(vpu_clk);
-                               clk_unprepare(vpu_clk);
-                               mutex_unlock(&vpu_data.lock);
+                               mutex_unlock(&vpu_data->lock);
                                return -EAGAIN;
                        }
                }
-               clk_disable(vpu_clk);
-               clk_unprepare(vpu_clk);
-
-               /* Make sure clock is disabled before suspend */
-               vpu_clk_usercount = atomic_read(&clk_cnt_from_ioc);
-               for (i = 0; i < vpu_clk_usercount; i++) {
-                       clk_disable(vpu_clk);
-                       clk_unprepare(vpu_clk);
-               }
 
-               if (cpu_is_mx53()) {
-                       mutex_unlock(&vpu_data.lock);
+               if (vpu_data->soc_data->is_mx53) {
+                       mutex_unlock(&vpu_data->lock);
                        return 0;
                }
 
                if (bitwork_mem.cpu_addr != 0) {
-                       clk_prepare(vpu_clk);
-                       clk_enable(vpu_clk);
+                       int i;
+
                        /* Save 64 registers from BIT_CODE_BUF_ADDR */
                        for (i = 0; i < 64; i++)
                                regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
                        pc_before_suspend = READ_REG(BIT_CUR_PC);
-                       clk_disable(vpu_clk);
-                       clk_unprepare(vpu_clk);
                }
 
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
-               if (vpu_plat->pg)
-                       vpu_plat->pg(1);
-#endif
-
+               vpu_clk_disable(vpu_data);
                /* If VPU is working before suspend, disable
-                * regulator to make usecount right. */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
-               if (!IS_ERR(vpu_regulator))
-                       regulator_disable(vpu_regulator);
-#else
-               imx_gpc_power_up_pu(false);
-#endif
+                * regulator to make usecount right.
+                */
+               vpu_power_down();
        }
 
-       mutex_unlock(&vpu_data.lock);
+       mutex_unlock(&vpu_data->lock);
        return 0;
 }
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
 static int vpu_resume(struct device *dev)
-#else
-static int vpu_resume(struct platform_device *pdev)
-#endif
 {
        int i;
+       struct vpu_priv *vpu_data = dev_get_drvdata(dev);
 
-       mutex_lock(&vpu_data.lock);
-       if (open_count == 0) {
-               /* VPU is released (all instances are freed),
-                * clock should be kept off, context is no longer needed,
-                * power should be kept off on MX6,
-                * disable power gating on MX51 */
-               if (cpu_is_mx51()) {
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
-                       if (vpu_plat->pg)
-                               vpu_plat->pg(0);
-#endif
+       mutex_lock(&vpu_data->lock);
+
+       if (open_count) {
+               if (vpu_data->soc_data->is_mx53) {
+                       vpu_clk_enable(vpu_data);
+                       goto out;
                }
-       } else {
-               if (cpu_is_mx53())
-                       goto recover_clk;
 
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
                /* If VPU is working before suspend, enable
-                * regulator to make usecount right. */
-               if (!IS_ERR(vpu_regulator))
-                       regulator_enable(vpu_regulator);
-
-               if (vpu_plat->pg)
-                       vpu_plat->pg(0);
-#else
-               imx_gpc_power_up_pu(true);
-#endif
+                * regulator to make usecount right.
+                */
+               vpu_power_up();
 
-               if (bitwork_mem.cpu_addr != 0) {
-                       u32 *p = (u32 *) bitwork_mem.cpu_addr;
+               if (bitwork_mem.cpu_addr != NULL) {
+                       u32 *p = bitwork_mem.cpu_addr;
                        u32 data, pc;
                        u16 data_hi;
                        u16 data_lo;
 
-                       clk_prepare(vpu_clk);
-                       clk_enable(vpu_clk);
+                       vpu_clk_enable(vpu_data);
 
                        pc = READ_REG(BIT_CUR_PC);
                        if (pc) {
-                               printk(KERN_WARNING "Not power off after suspend (PC=0x%x)\n", pc);
-                               clk_disable(vpu_clk);
-                               clk_unprepare(vpu_clk);
-                               goto recover_clk;
+                               dev_warn(vpu_dev, "Not power off after suspend (PC=0x%x)\n", pc);
+                               goto out;
                        }
 
                        /* Restore registers */
@@ -1183,9 +1051,8 @@ static int vpu_resume(struct platform_device *pdev)
                        WRITE_REG(0x0, BIT_RESET_CTRL);
                        WRITE_REG(0x0, BIT_CODE_RUN);
                        /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
-#ifdef CONFIG_SOC_IMX6Q
-                       WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
-#endif
+                       if (vpu_data->soc_data->quirk_subblk_en)
+                               WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
 
                        /*
                         * Re-load boot code, from the codebuffer in external RAM.
@@ -1214,136 +1081,36 @@ static int vpu_resume(struct platform_device *pdev)
                                while (READ_REG(BIT_BUSY_FLAG))
                                        ;
                        } else {
-                               printk(KERN_WARNING "PC=0 before suspend\n");
+                               dev_warn(vpu_dev, "PC=0 before suspend\n");
                        }
-                       clk_disable(vpu_clk);
-                       clk_unprepare(vpu_clk);
-               }
-
-recover_clk:
-               /* Recover vpu clock */
-               for (i = 0; i < vpu_clk_usercount; i++) {
-                       clk_prepare(vpu_clk);
-                       clk_enable(vpu_clk);
                }
        }
-
-       mutex_unlock(&vpu_data.lock);
-       return 0;
-}
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
-static int vpu_runtime_suspend(struct device *dev)
-{
-       release_bus_freq(BUS_FREQ_HIGH);
-       return 0;
-}
-
-static int vpu_runtime_resume(struct device *dev)
-{
-       request_bus_freq(BUS_FREQ_HIGH);
+out:
+       mutex_unlock(&vpu_data->lock);
        return 0;
 }
 
-static const struct dev_pm_ops vpu_pm_ops = {
-       SET_RUNTIME_PM_OPS(vpu_runtime_suspend, vpu_runtime_resume, NULL)
-       SET_SYSTEM_SLEEP_PM_OPS(vpu_suspend, vpu_resume)
-};
-#endif
-
+static SIMPLE_DEV_PM_OPS(vpu_pm_ops, vpu_suspend, vpu_resume);
+#define VPU_PM_OPS &vpu_pm_ops
 #else
-#define        vpu_suspend     NULL
-#define        vpu_resume      NULL
-#endif                         /* !CONFIG_PM */
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
-static const struct of_device_id vpu_of_match[] = {
-       { .compatible = "fsl,imx6-vpu", },
-       {/* sentinel */}
-};
-MODULE_DEVICE_TABLE(of, vpu_of_match);
-#endif
+#define VPU_PM_OPS NULL
+#endif /* !CONFIG_PM */
 
 /*! Driver definition
  *
  */
 static struct platform_driver mxcvpu_driver = {
        .driver = {
-                  .name = "mxc_vpu",
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
-                  .of_match_table = vpu_of_match,
-#ifdef CONFIG_PM
-                  .pm = &vpu_pm_ops,
-#endif
-#endif
-                  },
+               .name = "mxc_vpu",
+               .of_match_table = vpu_of_match,
+               .pm = VPU_PM_OPS,
+       },
        .probe = vpu_dev_probe,
        .remove = vpu_dev_remove,
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
-       .suspend = vpu_suspend,
-       .resume = vpu_resume,
-#endif
 };
 
-static int __init vpu_init(void)
-{
-       int ret = platform_driver_register(&mxcvpu_driver);
-
-       init_waitqueue_head(&vpu_queue);
-
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
-       memblock_analyze();
-       top_address_DRAM = memblock_end_of_DRAM_with_reserved();
-#endif
-
-       return ret;
-}
-
-static void __exit vpu_exit(void)
-{
-       if (vpu_major > 0) {
-               device_destroy(vpu_class, MKDEV(vpu_major, 0));
-               class_destroy(vpu_class);
-               unregister_chrdev(vpu_major, "mxc_vpu");
-               vpu_major = 0;
-       }
-
-       vpu_free_dma_buffer(&bitwork_mem);
-       vpu_free_dma_buffer(&pic_para_mem);
-       vpu_free_dma_buffer(&user_data_mem);
-
-       /* reset VPU state */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
-       if (!IS_ERR(vpu_regulator))
-               regulator_enable(vpu_regulator);
-       clk_prepare(vpu_clk);
-       clk_enable(vpu_clk);
-       if (vpu_plat->reset)
-               vpu_plat->reset();
-       clk_disable(vpu_clk);
-       clk_unprepare(vpu_clk);
-       if (!IS_ERR(vpu_regulator))
-               regulator_disable(vpu_regulator);
-#else
-       imx_gpc_power_up_pu(true);
-       clk_prepare(vpu_clk);
-       clk_enable(vpu_clk);
-       imx_src_reset_vpu();
-       clk_disable(vpu_clk);
-       clk_unprepare(vpu_clk);
-       imx_gpc_power_up_pu(false);
-#endif
-
-       clk_put(vpu_clk);
-
-       platform_driver_unregister(&mxcvpu_driver);
-       return;
-}
+module_platform_driver(mxcvpu_driver);
 
 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
 MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
 MODULE_LICENSE("GPL");
-
-module_init(vpu_init);
-module_exit(vpu_exit);