comment "Qualcomm MSM Camera And Video"
+source "drivers/media/platform/msm/camss-8x16/Kconfig"
source "drivers/media/platform/msm/vidc/Kconfig"
# based on V4L2.
#
+obj-$(CONFIG_MSM_CAMSS_V4L2) += camss-8x16/
obj-$(CONFIG_MSM_VIDC_V4L2) += vidc/
--- /dev/null
+#
+# CAMERA SUB-SYTEM
+#
+
+menuconfig MSM_CAMSS_V4L2
+ tristate "Qualcomm MSM 8x16 V4L2 based camera sub-system driver"
+ depends on ARCH_QCOM && VIDEO_V4L2
+# select ARM64_DMA_USE_IOMMU
+ select VIDEOBUF2_DMA_CONTIG
--- /dev/null
+# Makefile for Qualcomm camss driver
+
+ccflags-y += -Idrivers/media/platform/msm/camss
+msm-camss-objs += \
+ camss.o \
+ csid.o \
+ csiphy.o \
+ ispif.o \
+ vfe.o \
+ video.o \
+
+obj-$(CONFIG_MSM_CAMSS_V4L2) += msm-camss.o
--- /dev/null
+/*
+ * camss.c
+ *
+ * Qualcomm MSM Camera Subsystem - Core
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2016 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-of.h>
+
+#include "camss.h"
+
+#define CAMSS_CSIPHY_NUM 2
+#define CAMSS_CSID_NUM 2
+
+static struct resources csiphy_res[] = {
+ /* CSIPHY0 */
+ {
+ .regulator = { NULL },
+ .clock = { "csiphy0_timer_src_clk", "csiphy0_timer_clk", "camss_ahb_src" },
+ .clock_rate = { 200000000, 0, 0 },
+ .reg = { "csiphy0", "csiphy0_clk_mux" },
+ .interrupt = { "csiphy0" }
+ },
+
+ /* CSIPHY1 */
+ {
+ .regulator = { NULL },
+ .clock = { "csiphy1_timer_src_clk", "csiphy1_timer_clk", "camss_ahb_src" },
+ .clock_rate = { 200000000, 0, 0 },
+ .reg = { "csiphy1", "csiphy1_clk_mux" },
+ .interrupt = { "csiphy1" }
+ }
+};
+
+static struct resources csid_res[] = {
+ /* CSID0 */
+ {
+ .regulator = { "vdda" },
+ .clock = { "camss_top_ahb_clk", "ispif_ahb_clk", "csi0_ahb_clk",
+ "csi0_src_clk", "csi0_clk", "csi0_phy_clk",
+ "csi0_pix_clk", "csi0_rdi_clk", "camss_ahb_clk" },
+ .clock_rate = { 0, 0, 0, 200000000, 0, 0, 0, 0, 0 },
+ .reg = { "csid0" },
+ .interrupt = { "csid0" }
+ },
+
+ /* CSID1 */
+ {
+ .regulator = { "vdda" },
+ .clock = { "camss_top_ahb_clk", "ispif_ahb_clk", "csi1_ahb_clk",
+ "csi1_src_clk", "csi1_clk", "csi1_phy_clk",
+ "csi1_pix_clk", "csi1_rdi_clk", "camss_ahb_clk" },
+ .clock_rate = { 0, 0, 0, 200000000, 0, 0, 0, 0, 0 },
+ .reg = { "csid1" },
+ .interrupt = { "csid1" }
+ },
+};
+
+static struct resources_ispif ispif_res = {
+ /* ISPIF */
+ .clock = { "camss_ahb_src", "ispif_ahb_clk", "csi0_src_clk",
+ "csi0_clk", "csi0_pix_clk", "csi0_rdi_clk",
+ "csi1_src_clk", "csi1_clk", "csi1_pix_clk",
+ "csi1_rdi_clk", "vfe_clk_src", "camss_vfe_vfe_clk",
+ "camss_csi_vfe_clk"
+ },
+ .clock_for_reset = { 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+ .reg = { "ispif", "csi_clk_mux" },
+ .interrupt = "ispif"
+
+};
+
+/*
+ * camss_pipeline_pm_use_count - Count the number of users of a pipeline
+ * @entity: The entity
+ *
+ * Return the total number of users of all video device nodes in the pipeline.
+ */
+static int camss_pipeline_pm_use_count(struct media_entity *entity)
+{
+ struct media_entity_graph graph;
+ int use = 0;
+
+ media_entity_graph_walk_start(&graph, entity);
+
+ while ((entity = media_entity_graph_walk_next(&graph))) {
+ if (media_entity_type(entity) == MEDIA_ENT_T_DEVNODE)
+ use += entity->use_count;
+ }
+
+ return use;
+}
+
+/*
+ * camss_pipeline_pm_power_one - Apply power change to an entity
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Change the entity use count by @change. If the entity is a subdev update its
+ * power state by calling the core::s_power operation when the use count goes
+ * from 0 to != 0 or from != 0 to 0.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int camss_pipeline_pm_power_one(struct media_entity *entity, int change)
+{
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = media_entity_type(entity) == MEDIA_ENT_T_V4L2_SUBDEV
+ ? media_entity_to_v4l2_subdev(entity) : NULL;
+
+ if (entity->use_count == 0 && change > 0 && subdev != NULL) {
+ ret = v4l2_subdev_call(subdev, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+ }
+
+ entity->use_count += change;
+ WARN_ON(entity->use_count < 0);
+
+ if (entity->use_count == 0 && change < 0 && subdev != NULL)
+ v4l2_subdev_call(subdev, core, s_power, 0);
+
+ return 0;
+}
+
+/*
+ * camss_pipeline_pm_power - Apply power change to all entities in a pipeline
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Walk the pipeline to update the use count and the power state of all non-node
+ * entities.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int camss_pipeline_pm_power(struct media_entity *entity, int change)
+{
+ struct media_entity_graph graph;
+ struct media_entity *first = entity;
+ int ret = 0;
+
+ if (!change)
+ return 0;
+
+ media_entity_graph_walk_start(&graph, entity);
+
+ while (!ret && (entity = media_entity_graph_walk_next(&graph)))
+ if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
+ ret = camss_pipeline_pm_power_one(entity, change);
+
+ if (!ret)
+ return 0;
+
+ media_entity_graph_walk_start(&graph, first);
+
+ while ((first = media_entity_graph_walk_next(&graph))
+ && first != entity)
+ if (media_entity_type(first) != MEDIA_ENT_T_DEVNODE)
+ camss_pipeline_pm_power_one(first, -change);
+
+ return ret;
+}
+
+/*
+ * msm_camss_pipeline_pm_use - Update the use count of an entity
+ * @entity: The entity
+ * @use: Use (1) or stop using (0) the entity
+ *
+ * Update the use count of all entities in the pipeline and power entities on or
+ * off accordingly.
+ *
+ * Return 0 on success or a negative error code on failure. Powering entities
+ * off is assumed to never fail. No failure can occur when the use parameter is
+ * set to 0.
+ */
+int msm_camss_pipeline_pm_use(struct media_entity *entity, int use)
+{
+ int change = use ? 1 : -1;
+ int ret;
+
+ mutex_lock(&entity->parent->graph_mutex);
+
+ /* Apply use count to node. */
+ entity->use_count += change;
+ WARN_ON(entity->use_count < 0);
+
+ /* Apply power change to connected non-nodes. */
+ ret = camss_pipeline_pm_power(entity, change);
+ if (ret < 0)
+ entity->use_count -= change;
+
+ mutex_unlock(&entity->parent->graph_mutex);
+
+ return ret;
+}
+
+/*
+ * camss_pipeline_link_notify - Link management notification callback
+ * @link: The link
+ * @flags: New link flags that will be applied
+ * @notification: The link's state change notification type (MEDIA_DEV_NOTIFY_*)
+ *
+ * React to link management on powered pipelines by updating the use count of
+ * all entities in the source and sink sides of the link. Entities are powered
+ * on or off accordingly.
+ *
+ * Return 0 on success or a negative error code on failure. Powering entities
+ * off is assumed to never fail. This function will not fail for disconnection
+ * events.
+ */
+static int camss_pipeline_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
+{
+ struct media_entity *source = link->source->entity;
+ struct media_entity *sink = link->sink->entity;
+ int source_use = camss_pipeline_pm_use_count(source);
+ int sink_use = camss_pipeline_pm_use_count(sink);
+ int ret;
+
+ if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+ !(flags & MEDIA_LNK_FL_ENABLED)) {
+ /* Powering off entities is assumed to never fail. */
+ camss_pipeline_pm_power(source, -sink_use);
+ camss_pipeline_pm_power(sink, -source_use);
+ return 0;
+ }
+
+ if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
+ (flags & MEDIA_LNK_FL_ENABLED)) {
+
+ ret = camss_pipeline_pm_power(source, sink_use);
+ if (ret < 0)
+ return ret;
+
+ ret = camss_pipeline_pm_power(sink, source_use);
+ if (ret < 0)
+ camss_pipeline_pm_power(source, -sink_use);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int camss_alloc(struct device *dev, struct camss **c)
+{
+ struct camss *camss;
+
+ *c = devm_kzalloc(dev, sizeof(**c), GFP_KERNEL);
+ if (!*c) {
+ dev_err(dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ camss = *c;
+ camss->csiphy_num = CAMSS_CSIPHY_NUM;
+ camss->csiphy = devm_kzalloc(dev,
+ camss->csiphy_num * sizeof(*camss->csiphy),
+ GFP_KERNEL);
+
+ camss->csid_num = CAMSS_CSID_NUM;
+ camss->csid = devm_kzalloc(dev,
+ camss->csid_num * sizeof(*camss->csid),
+ GFP_KERNEL);
+ if (!camss->csiphy || !camss->csid) {
+ dev_err(dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int camss_of_parse_node(struct device *dev, struct device_node *node,
+ struct camss_async_subdev *csd)
+{
+ struct camss_csiphy_lanes_cfg *lncfg = &csd->interface.csi2.lanecfg;
+ int *settle_cnt = &csd->interface.csi2.settle_cnt;
+ struct v4l2_of_endpoint vep;
+ unsigned int i;
+
+ v4l2_of_parse_endpoint(node, &vep);
+
+ dev_dbg(dev, "parsing endpoint %s\n", node->full_name);
+
+ csd->interface.id = vep.base.port;
+
+ lncfg->clk.pos = vep.bus.mipi_csi2.clock_lane;
+ lncfg->clk.pol = vep.bus.mipi_csi2.lane_polarities[0];
+ dev_dbg(dev, "clock lane polarity %u, pos %u\n",
+ lncfg->clk.pol, lncfg->clk.pos);
+
+ lncfg->num_data = vep.bus.mipi_csi2.num_data_lanes;
+
+ lncfg->data = devm_kzalloc(dev, lncfg->num_data * sizeof(*lncfg->data),
+ GFP_KERNEL);
+ if (!lncfg->data) {
+ dev_err(dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < lncfg->num_data; i++) {
+ lncfg->data[i].pos = vep.bus.mipi_csi2.data_lanes[i];
+ lncfg->data[i].pol =
+ vep.bus.mipi_csi2.lane_polarities[i + 1];
+ dev_dbg(dev, "data lane %u polarity %u, pos %u\n", i,
+ lncfg->data[i].pol, lncfg->data[i].pos);
+ }
+
+ of_property_read_u32(node, "qcom,settle-cnt", settle_cnt);
+
+ return 0;
+}
+
+static int camss_of_parse_nodes(struct device *dev,
+ struct v4l2_async_notifier *notifier)
+{
+ struct device_node *node = NULL;
+ int size, i;
+ int ret;
+
+ while ((node = of_graph_get_next_endpoint(dev->of_node, node))) {
+ notifier->num_subdevs++;
+ }
+ dev_err(dev, "notifier->num_subdevs = %u\n", notifier->num_subdevs);
+
+ size = sizeof(*notifier->subdevs) * notifier->num_subdevs;
+ notifier->subdevs = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!notifier->subdevs) {
+ dev_err(dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ i = 0;
+ while ((node = of_graph_get_next_endpoint(dev->of_node, node))) {
+ struct camss_async_subdev *csd;
+
+ csd = devm_kzalloc(dev, sizeof(*csd), GFP_KERNEL);
+ if (!csd) {
+ of_node_put(node);
+ dev_err(dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ notifier->subdevs[i++] = &csd->asd;
+
+ ret = camss_of_parse_node(dev, node, csd);
+ if (ret < 0) {
+ of_node_put(node);
+ return ret;
+ }
+
+ csd->asd.match.of.node = of_graph_get_remote_port_parent(node);
+ of_node_put(node);
+ if (!csd->asd.match.of.node) {
+ dev_warn(dev, "bad remote port parent\n");
+ return -EINVAL;
+ }
+
+ csd->asd.match_type = V4L2_ASYNC_MATCH_OF;
+ }
+
+ return notifier->num_subdevs;
+}
+
+static int camss_init_subdevices(struct camss *camss)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < camss->csiphy_num; i++) {
+ ret = msm_csiphy_subdev_init(&camss->csiphy[i], camss,
+ &csiphy_res[i], i);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Failed to init csiphy[%d] sub-device\n", i);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < camss->csid_num; i++) {
+ ret = msm_csid_subdev_init(&camss->csid[i], camss,
+ &csid_res[i], i);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Failed to init csid[%d] sub-device\n", i);
+ return ret;
+ }
+ }
+
+ ret = msm_ispif_subdev_init(&camss->ispif, camss, &ispif_res);
+ if (ret < 0) {
+ dev_err(camss->dev, "Failed to init ispif sub-device\n");
+ return ret;
+ }
+
+ camss->vfe_init.num_cids = 1;
+ camss->vfe_init.cid[0] = -1;
+ ret = msm_vfe_subdev_init(&camss->vfe, camss, &camss->vfe_init);
+ if (ret < 0) {
+ dev_err(camss->dev, "Fail to init vfe sub-device\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int camss_register_entities(struct camss *camss)
+{
+ int i, j;
+ int ret;
+
+ for (i = 0; i < camss->csiphy_num; i++) {
+ ret = msm_csiphy_register_entities(&camss->csiphy[i],
+ &camss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Failed to register csiphy[%d] entity\n", i);
+ goto err_reg_csiphy;
+ }
+ }
+
+ for (i = 0; i < camss->csiphy_num; i++) {
+ ret = msm_csid_register_entities(&camss->csid[i],
+ &camss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Failed to register csid[%d] entity\n", i);
+ goto err_reg_csid;
+ }
+ }
+
+ ret = msm_ispif_register_entities(&camss->ispif, &camss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(camss->dev, "Fail to register ispif entities\n");
+ goto err_reg_ispif;
+ }
+
+ ret = msm_vfe_register_entities(&camss->vfe, &camss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(camss->dev, "Fail to register vfe entities\n");
+ goto err_reg_vfe;
+ }
+
+ for (i = 0; i < camss->csiphy_num; i++) {
+ for (j = 0; j < camss->csid_num; j++) {
+ ret = media_entity_create_link(
+ &camss->csiphy[i].subdev.entity,
+ MSM_CSIPHY_PAD_SRC,
+ &camss->csid[j].subdev.entity,
+ MSM_CSID_PAD_SINK,
+ 0);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Fail to link %s->%s entities\n",
+ camss->csiphy[i].subdev.entity.name,
+ camss->csid[j].subdev.entity.name);
+ goto err_link;
+ }
+ }
+ }
+
+ for (i = 0; i < camss->csid_num; i++) {
+ ret = media_entity_create_link(
+ &camss->csid[i].subdev.entity, MSM_CSID_PAD_SRC,
+ &camss->ispif.subdev.entity, MSM_ISPIF_PAD_SINK, 0);
+ if (ret < 0) {
+ dev_err(camss->dev, "Fail to link %s->%s entities\n",
+ camss->csid[i].subdev.entity.name,
+ camss->ispif.subdev.entity.name);
+ goto err_link;
+ }
+ }
+
+ ret = media_entity_create_link(
+ &camss->ispif.subdev.entity, MSM_ISPIF_PAD_SRC,
+ &camss->vfe.subdev.entity, MSM_VFE_PAD_SINK,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret < 0) {
+ dev_err(camss->dev, "Fail to link %s->%s entities\n",
+ camss->ispif.subdev.entity.name,
+ camss->vfe.subdev.entity.name);
+ goto err_link;
+ }
+
+ return 0;
+
+err_link:
+ msm_vfe_unregister_entities(&camss->vfe);
+err_reg_vfe:
+ msm_ispif_unregister_entities(&camss->ispif);
+err_reg_ispif:
+
+ i = camss->csid_num;
+err_reg_csid:
+ for (i--; i >= 0; i--) {
+ msm_csid_unregister_entities(&camss->csid[i]);
+ }
+
+ i = camss->csiphy_num;
+err_reg_csiphy:
+ for (i--; i >= 0; i--) {
+ msm_csiphy_unregister_entities(&camss->csiphy[i]);
+ }
+
+ return ret;
+}
+
+static void camss_unregister_entities(struct camss *camss)
+{
+ int i;
+
+ for (i = camss->csiphy_num - 1; i >= 0; i--)
+ msm_csiphy_unregister_entities(&camss->csiphy[i]);
+
+ for (i = camss->csid_num - 1; i >= 0; i--)
+ msm_csid_unregister_entities(&camss->csid[i]);
+
+ msm_ispif_unregister_entities(&camss->ispif);
+ msm_vfe_unregister_entities(&camss->vfe);
+}
+
+static int camss_subdev_notifier_bound(struct v4l2_async_notifier *async,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct media_entity *sensor = &subdev->entity;
+ struct camss *camss = container_of(async, struct camss, notifier);
+ struct camss_async_subdev *csd =
+ container_of(asd, struct camss_async_subdev, asd);
+ enum camss_csiphy id = csd->interface.id;
+ struct csiphy_device *csiphy = &camss->csiphy[id];
+ struct media_entity *input = &csiphy->subdev.entity;
+ unsigned int flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED;
+ unsigned int pad = MSM_CSIPHY_PAD_SINK;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < sensor->num_pads; i++) {
+ if (sensor->pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ break;
+ }
+ if (i == sensor->num_pads) {
+ dev_err(camss->dev, "%s: no source pad in external entity\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ ret = media_entity_create_link(sensor, i, input, pad, flags);
+ if (ret < 0) {
+ dev_err(camss->dev, "Fail to link %s->%s entities\n",
+ sensor->name, input->name);
+ return ret;
+ }
+
+ csiphy->cfg.csi2 = &csd->interface.csi2;
+
+ return 0;
+}
+
+static int camss_subdev_notifier_complete(struct v4l2_async_notifier *async)
+{
+ struct camss *camss = container_of(async, struct camss, notifier);
+
+ return v4l2_device_register_subdev_nodes(&camss->v4l2_dev);
+}
+
+static int camss_probe(struct platform_device *pdev)
+{
+ struct camss *camss;
+ int ret;
+
+ dev_dbg(&pdev->dev, "Enter\n");
+
+ ret = camss_alloc(&pdev->dev, &camss);
+ if (ret < 0)
+ return ret;
+
+ camss->dev = &pdev->dev;
+ platform_set_drvdata(pdev, camss);
+
+ ret = camss_of_parse_nodes(&pdev->dev, &camss->notifier);
+ if (ret < 0)
+ return ret;
+
+ ret = camss_init_subdevices(camss);
+ if (ret < 0)
+ return ret;
+
+ camss->media_dev.dev = camss->dev;
+ strlcpy(camss->media_dev.model, "QC MSM CAMSS",
+ sizeof(camss->media_dev.model));
+ camss->media_dev.driver_version = CAMSS_VERSION;
+ camss->media_dev.link_notify = camss_pipeline_link_notify;
+ ret = media_device_register(&camss->media_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "%s: Media device registration failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ camss->v4l2_dev.mdev = &camss->media_dev;
+ ret = v4l2_device_register(camss->dev, &camss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "%s: V4L2 device registration failed (%d)\n",
+ __func__, ret);
+ goto err_register_v4l2;
+ }
+
+ ret = camss_register_entities(camss);
+ if (ret < 0)
+ goto err_register_entities;
+
+ if (camss->notifier.num_subdevs) {
+ camss->notifier.bound = camss_subdev_notifier_bound;
+ camss->notifier.complete = camss_subdev_notifier_complete;
+
+ ret = v4l2_async_notifier_register(&camss->v4l2_dev,
+ &camss->notifier);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: V4L2 async notifier registration failed (%d)\n",
+ __func__, ret);
+ goto err_register_subdevs;
+ }
+ } else {
+ ret = v4l2_device_register_subdev_nodes(&camss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "%s: V4L2 subdev nodes registration failed (%d)\n",
+ __func__, ret);
+ goto err_register_subdevs;
+ }
+ }
+
+ dev_dbg(&pdev->dev, "camss driver registered successfully!\n");
+
+ return 0;
+
+err_register_subdevs:
+ camss_unregister_entities(camss);
+err_register_entities:
+ v4l2_device_unregister(&camss->v4l2_dev);
+err_register_v4l2:
+ media_device_unregister(&camss->media_dev);
+
+ return ret;
+}
+
+static int camss_remove(struct platform_device *pdev)
+{
+ struct camss *camss = platform_get_drvdata(pdev);
+
+ v4l2_async_notifier_unregister(&camss->notifier);
+ camss_unregister_entities(camss);
+ v4l2_device_unregister(&camss->v4l2_dev);
+ media_device_unregister(&camss->media_dev);
+
+ return 0;
+}
+
+static const struct of_device_id camss_dt_match[] = {
+ { .compatible = "qcom,msm-camss" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, camss_dt_match);
+
+static struct platform_driver qcom_camss_driver = {
+ .probe = camss_probe,
+ .remove = camss_remove,
+ .driver = {
+ .name = "qcom-camss",
+ .of_match_table = camss_dt_match,
+ },
+};
+
+module_platform_driver(qcom_camss_driver);
+
+MODULE_ALIAS("platform:qcom-camss");
+MODULE_DESCRIPTION("Qualcomm camera subsystem driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * camss.h
+ *
+ * Qualcomm MSM Camera Subsystem - Core
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2016 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef QC_MSM_CAMSS_H
+#define QC_MSM_CAMSS_H
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/media-device.h>
+#include <linux/device.h>
+
+#include "csid.h"
+#include "csiphy.h"
+#include "ispif.h"
+#include "vfe.h"
+
+#define CAMSS_VERSION KERNEL_VERSION(0, 1, 0)
+
+#define CAMSS_RES_MAX 15
+
+struct resources {
+ char *regulator[CAMSS_RES_MAX];
+ char *clock[CAMSS_RES_MAX];
+ s32 clock_rate[CAMSS_RES_MAX];
+ char *reg[CAMSS_RES_MAX];
+ char *interrupt[CAMSS_RES_MAX];
+};
+
+struct resources_ispif {
+ char *clock[CAMSS_RES_MAX];
+ u8 clock_for_reset[CAMSS_RES_MAX];
+ char *reg[CAMSS_RES_MAX];
+ char *interrupt;
+};
+
+struct camss {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_async_notifier notifier;
+ struct media_device media_dev;
+ struct device *dev;
+ int csiphy_num;
+ struct csiphy_device *csiphy;
+ int csid_num;
+ struct csid_device *csid;
+ struct ispif_device ispif;
+ struct vfe_device vfe;
+ struct vfe_init vfe_init;
+ struct device *iommu_dev;
+};
+
+enum camss_csiphy {
+ CAMSS_CSIPHY0 = 0,
+ CAMSS_CSIPHY1
+};
+
+struct camss_csiphy_lane {
+ u8 pos;
+ u8 pol;
+};
+
+struct camss_csiphy_lanes_cfg {
+ int num_data;
+ struct camss_csiphy_lane *data;
+ struct camss_csiphy_lane clk;
+};
+
+struct camss_csi2_cfg {
+ int settle_cnt;
+ struct camss_csiphy_lanes_cfg lanecfg;
+};
+
+struct camss_camera_interface {
+ enum camss_csiphy id;
+ struct camss_csi2_cfg csi2;
+};
+
+struct camss_async_subdev {
+ struct camss_camera_interface interface;
+ struct v4l2_async_subdev asd;
+};
+
+int msm_camss_pipeline_pm_use(struct media_entity *entity, int use);
+
+#endif /* QC_MSM_CAMSS_H */
--- /dev/null
+/*
+ * csid.c
+ *
+ * Qualcomm MSM Camera Subsystem - CSID Module
+ *
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2016 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "csid.h"
+#include "camss.h"
+
+#define MSM_CSID_NAME "msm_csid"
+
+#define CAMSS_CSID_HW_VERSION 0x0
+#define CAMSS_CSID_CORE_CTRL_0 0x004
+#define CAMSS_CSID_CORE_CTRL_1 0x008
+#define CAMSS_CSID_RST_CMD 0x00c
+#define CAMSS_CSID_CID_LUT_VC_n(n) (0x010 + 0x4 * (n))
+#define CAMSS_CSID_CID_n_CFG(n) (0x020 + 0x4 * (n))
+#define CAMSS_CSID_IRQ_CLEAR_CMD 0x060
+#define CAMSS_CSID_IRQ_MASK 0x064
+#define CAMSS_CSID_IRQ_STATUS 0x068
+#define CAMSS_CSID_TG_CTRL 0x0a0
+#define CAMSS_CSID_TG_VC_CFG 0x0a4
+#define CAMSS_CSID_TG_VC_CFG_H_BLANKING 0x3ff
+#define CAMSS_CSID_TG_VC_CFG_V_BLANKING 0x7f
+#define CAMSS_CSID_TG_DT_n_CGG_0(n) (0x0ac + 0xc * (n))
+#define CAMSS_CSID_TG_DT_n_CGG_1(n) (0x0b0 + 0xc * (n))
+#define CAMSS_CSID_TG_DT_n_CGG_2(n) (0x0b4 + 0xc * (n))
+
+/*
+ * csid_isr - CSID module interrupt handler
+ * @irq: Interrupt line
+ * @dev: CSID device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t csid_isr(int irq, void *dev)
+{
+ struct csid_device *csid = dev;
+ u32 value;
+
+ value = readl(csid->base + CAMSS_CSID_IRQ_STATUS);
+ writel(value, csid->base + CAMSS_CSID_IRQ_CLEAR_CMD);
+
+ if ((value >> 11) & 0x1)
+ complete(&csid->reset_complete);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csid_enable_clocks - Enable clocks for CSID module and
+ * set clock rates where needed
+ * @csid: CSID device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csid_enable_clocks(int nclocks, struct clk **clock, s32 *clock_rate)
+{
+ long clk_rate;
+ int ret;
+ int i;
+
+ for (i = 0; i < nclocks; i++) {
+ if (clock_rate[i]) {
+ clk_rate = clk_round_rate(clock[i], clock_rate[i]);
+ if (clk_rate < 0) {
+ pr_err("clock round rate failed\n");
+ ret = clk_rate;
+ goto error;
+ }
+ ret = clk_set_rate(clock[i], clk_rate);
+ if (ret < 0) {
+ pr_err("clock set rate failed\n");
+ goto error;
+ }
+ }
+ ret = clk_prepare_enable(clock[i]);
+ if (ret) {
+ pr_err("clock enable failed\n");
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ for (i--; i >= 0; i--)
+ clk_disable_unprepare(clock[i]);
+
+ return ret;
+}
+
+/*
+ * csid_disable_clocks - Disable clocks for CSID module
+ * @csid: CSID device
+ */
+static void csid_disable_clocks(int nclocks, struct clk **clock)
+{
+ int i;
+
+ for (i = nclocks - 1; i >= 0; i--)
+ clk_disable_unprepare(clock[i]);
+}
+
+/*
+ * csid_set_power - Power on/off CSID module
+ * @sd: CSID V4L2 subdevice
+ * @on: Requested power state
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csid_set_power(struct v4l2_subdev *sd, int on)
+{
+ struct csid_device *csid = v4l2_get_subdevdata(sd);
+ int ret;
+
+ dev_err(csid->camss->dev, "%s: Enter, csid%d on = %d\n",
+ __func__, csid->id, on);
+
+ if (on) {
+ u32 hw_version;
+
+ ret = regulator_enable(csid->vdda);
+ if (ret < 0)
+ return ret;
+
+ ret = csid_enable_clocks(csid->nclocks, csid->clock,
+ csid->clock_rate);
+ if (ret < 0)
+ return ret;
+
+ enable_irq(csid->irq);
+
+ hw_version = readl(csid->base + CAMSS_CSID_HW_VERSION);
+ dev_err(csid->camss->dev, "CSID HW Version = 0x%08x\n", hw_version);
+ } else {
+ disable_irq(csid->irq);
+
+ csid_disable_clocks(csid->nclocks, csid->clock);
+
+ ret = regulator_disable(csid->vdda);
+ if (ret < 0)
+ return ret;
+ }
+
+ dev_err(csid->camss->dev, "%s: Exit, csid%d on = %d\n",
+ __func__, csid->id, on);
+
+ return 0;
+}
+
+#define DATA_TYPE_YUV422_8BIT 0x1e
+
+/*
+ * csid_get_data_type - map media but format to data type
+ * @fmt media bus format code
+ *
+ * Return data type code
+ */
+static u8 csid_get_data_type(u32 fmt)
+{
+ switch (fmt) {
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ return DATA_TYPE_YUV422_8BIT;
+ }
+
+ return 0;
+}
+
+#define DECODE_FORMAT_UNCOMPRESSED_8_BIT 0x1
+
+/*
+ * csid_get_decode_format - map media but format to decode format
+ * @fmt media bus format code
+ *
+ * Return decode format code
+ */
+static u8 csid_get_decode_format(u32 fmt)
+{
+ switch (fmt) {
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ return DECODE_FORMAT_UNCOMPRESSED_8_BIT;
+ }
+
+ return 0;
+}
+
+/*
+ * csid_set_stream - Enable/disable streaming on CSID module
+ * @sd: CSID V4L2 subdevice
+ * @enable: Requested streaming state
+ *
+ * Main configuration of CSID module is also done here.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csid_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct csid_device *csid = v4l2_get_subdevdata(sd);
+ struct csid_testgen_config *tg = &csid->testgen;
+
+ dev_err(csid->camss->dev, "%s: Enter, csid%d enable = %d\n",
+ __func__, csid->id, enable);
+
+ if (enable) {
+ u8 vc = 0; /* TODO: How to get this from sensor? */
+ u8 cid = vc * 4;
+ u8 dt, dt_shift, df;
+ u32 val;
+ int ret;
+
+ ret = v4l2_ctrl_handler_setup(&csid->ctrls);
+ if (ret < 0) {
+ dev_err(csid->camss->dev,
+ "could not sync v4l2 controls\n");
+ return ret;
+ }
+
+ if (!tg->enabled &&
+ !media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK])) {
+ return -ENOLINK;
+ }
+
+ /* Reset */
+ writel(0x7FFF, csid->base + CAMSS_CSID_RST_CMD);
+ wait_for_completion(&csid->reset_complete);
+
+ dt = csid_get_data_type(csid->fmt[MSM_CSID_PAD_SRC].code);
+
+ if (tg->enabled) {
+ /* Config Test Generator */
+ u32 num_bytes_per_line =
+ csid->fmt[MSM_CSID_PAD_SRC].width * 2;
+ u32 num_lines = csid->fmt[MSM_CSID_PAD_SRC].height;
+
+ /* 31:24 V blank, 23:13 H blank, 3:2 num of active DT */
+ /* 1:0 VC */
+ val = ((CAMSS_CSID_TG_VC_CFG_V_BLANKING & 0xff) << 24) |
+ ((CAMSS_CSID_TG_VC_CFG_H_BLANKING & 0x7ff) << 13);
+ writel(val, csid->base + CAMSS_CSID_TG_VC_CFG);
+
+ /* 28:16 bytes per lines, 12:0 num of lines */
+ val = ((num_bytes_per_line & 0x1FFF) << 16) |
+ (num_lines & 0x1FFF);
+ writel(val, csid->base + CAMSS_CSID_TG_DT_n_CGG_0(0));
+
+ /* 5:0 data type */
+ val = dt;
+ writel(val, csid->base + CAMSS_CSID_TG_DT_n_CGG_1(0));
+
+ /* 2:0 output random */
+ val = tg->payload_mode;
+ writel(val, csid->base + CAMSS_CSID_TG_DT_n_CGG_2(0));
+ } else {
+ struct csid_phy_config *phy = &csid->phy;
+
+ val = phy->lane_cnt - 1;
+ val |= phy->lane_assign << 4;
+
+ writel(val, csid->base + CAMSS_CSID_CORE_CTRL_0);
+
+ val = phy->csiphy_id << 17;
+ val |= 0x9;
+
+ writel(val, csid->base + CAMSS_CSID_CORE_CTRL_1);
+ }
+
+ /* Config LUT */
+
+ dt_shift = (cid % 4) * 8;
+ df = csid_get_decode_format(csid->fmt[MSM_CSID_PAD_SINK].code);
+
+ val = readl(csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
+ val &= ~(0xff << dt_shift);
+ val |= dt << dt_shift;
+ writel(val, csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
+
+ val = (df << 4) | 0x3;
+ writel(val, csid->base + CAMSS_CSID_CID_n_CFG(cid));
+
+ if (tg->enabled) {
+ val = 0x00a06437;
+ writel(val, csid->base + CAMSS_CSID_TG_CTRL);
+ }
+ } else {
+ if (tg->enabled) {
+ u32 val = 0x00a06436;
+ writel(val, csid->base + CAMSS_CSID_TG_CTRL);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * __csid_get_format - Get pointer to format structure
+ * @csid: CSID device
+ * @cfg: V4L2 subdev pad configuration
+ * @pad: pad from which format is requested
+ * @which: TRY or ACTIVE format
+ *
+ * Return pointer to TRY or ACTIVE format structure
+ */
+static struct v4l2_mbus_framefmt *
+__csid_get_format(struct csid_device *csid,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&csid->subdev, cfg, pad);
+
+ return &csid->fmt[pad];
+}
+
+/*
+ * csid_get_format - Handle get format by pads subdev method
+ * @sd: CSID V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: pointer to v4l2 subdev format structure
+ *
+ * Return -EINVAL or zero on success
+ */
+static int csid_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct csid_device *csid = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __csid_get_format(csid, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+/*
+ * csid_set_format - Handle set format by pads subdev method
+ * @sd: CSID V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: pointer to v4l2 subdev format structure
+ *
+ * Return -EINVAL or zero on success
+ */
+static int csid_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct csid_device *csid = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (fmt->pad == MSM_CSID_PAD_SINK) {
+ /* Set format on sink pad */
+ format = __csid_get_format(csid, cfg, fmt->pad,
+ fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ *format = fmt->format;
+
+ /* Reset format on source pad */
+ format = __csid_get_format(csid, cfg, MSM_CSID_PAD_SRC,
+ fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ *format = fmt->format;
+ } else {
+ if (media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK])) {
+ /* CSID is linked to CSIPHY */
+ /* Reset format on source pad to sink pad format */
+
+ format = __csid_get_format(csid, cfg, MSM_CSID_PAD_SINK,
+ fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+
+ format = __csid_get_format(csid, cfg, fmt->pad,
+ fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ *format = fmt->format;
+ } else {
+ /* CSID is not linked to CSIPHY */
+ /* Set format on source pad to allow */
+ /* test generator usage */
+
+ format = __csid_get_format(csid, cfg, fmt->pad,
+ fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ /* Accept only YUV422 format */
+ fmt->format.code = MEDIA_BUS_FMT_UYVY8_2X8;
+ *format = fmt->format;
+ }
+ }
+
+ return 0;
+}
+
+static const char * const csid_test_pattern_menu[] = {
+ "Disabled",
+ "Incrementing",
+ "Alternating 55/AA",
+ "All Zeros",
+ "All Ones",
+ "Random Data",
+};
+
+static int csid_set_test_pattern(struct csid_device *csid, s32 value)
+{
+ struct csid_testgen_config *tg = &csid->testgen;
+
+ /* If CSID is linked to CSIPHY, do not allow to enable test generator */
+ if (value && media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK]))
+ return -EBUSY;
+
+ tg->enabled = !!value;
+
+ switch (value) {
+ case 1:
+ tg->payload_mode = CSID_PAYLOAD_MODE_INCREMENTING;
+ break;
+ case 2:
+ tg->payload_mode = CSID_PAYLOAD_MODE_ALTERNATING_55_AA;
+ break;
+ case 3:
+ tg->payload_mode = CSID_PAYLOAD_MODE_ALL_ZEROES;
+ break;
+ case 4:
+ tg->payload_mode = CSID_PAYLOAD_MODE_ALL_ONES;
+ break;
+ case 5:
+ tg->payload_mode = CSID_PAYLOAD_MODE_RANDOM;
+ break;
+ }
+
+ return 0;
+}
+
+static int csid_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct csid_device *csid = container_of(ctrl->handler,
+ struct csid_device, ctrls);
+ int ret = -EINVAL;
+
+ switch (ctrl->id) {
+ case V4L2_CID_TEST_PATTERN:
+ ret = csid_set_test_pattern(csid, ctrl->val);
+ break;
+ }
+
+ return ret;
+}
+
+static struct v4l2_ctrl_ops csid_ctrl_ops = {
+ .s_ctrl = csid_s_ctrl,
+};
+
+/*
+ * msm_csid_subdev_init - Initialize CSID device structure and resources
+ * @csid: CSID device
+ * @camss: Camera sub-system structure
+ * @res: CSID module resources table
+ * @id: CSID module id
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int msm_csid_subdev_init(struct csid_device *csid, struct camss *camss,
+ struct resources *res, u8 id)
+{
+ struct device *dev = camss->dev;
+ struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+ struct resource *r;
+ int i;
+ int ret;
+
+ csid->camss = camss;
+
+ csid->id = id;
+
+ /* Memory */
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
+ csid->base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(csid->base)) {
+ dev_err(dev, "could not map memory\n");
+ return PTR_ERR(csid->base);
+ }
+
+ /* Interrupt */
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res->interrupt[0]);
+ csid->irq = r->start;
+ if (IS_ERR_VALUE(csid->irq))
+ return csid->irq;
+
+ ret = devm_request_irq(dev, csid->irq, csid_isr,
+ IRQF_TRIGGER_RISING, dev_name(dev), csid);
+ if (ret < 0) {
+ dev_err(dev, "request_irq failed\n");
+ return ret;
+ }
+
+ disable_irq(csid->irq);
+
+ /* Clocks */
+
+ i = 0;
+ csid->nclocks = 0;
+ while (res->clock[i++])
+ csid->nclocks++;
+
+ csid->clock = devm_kzalloc(dev, csid->nclocks * sizeof(*csid->clock),
+ GFP_KERNEL);
+ if (!csid->clock) {
+ dev_err(dev, "could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ csid->clock_rate = devm_kzalloc(dev, csid->nclocks *
+ sizeof(*csid->clock_rate), GFP_KERNEL);
+ if (!csid->clock_rate) {
+ dev_err(dev, "could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < csid->nclocks; i++) {
+ csid->clock[i] = devm_clk_get(dev, res->clock[i]);
+ if (IS_ERR(csid->clock[i]))
+ return PTR_ERR(csid->clock[i]);
+ csid->clock_rate[i] = res->clock_rate[i];
+ }
+
+ /* Regulator */
+
+ csid->vdda = devm_regulator_get(dev, res->regulator[0]);
+ if (IS_ERR(csid->vdda)) {
+ dev_err(dev, "could not get regulator\n");
+ return PTR_ERR(csid->vdda);
+ }
+
+ init_completion(&csid->reset_complete);
+
+ return 0;
+}
+
+/*
+ * csid_get_lane_assign - Calculate CSI2 lane assign configuration parameter
+ * @lane_cfg - CSI2 lane configuration
+ *
+ * Return lane assign
+ */
+static u32 csid_get_lane_assign(struct camss_csiphy_lanes_cfg *lanecfg)
+{
+ u32 lane_assign = 0;
+ int i;
+
+ for (i = 0; i < lanecfg->num_data; i++)
+ lane_assign |= lanecfg->data[i].pos << (i * 4);
+
+ return lane_assign;
+}
+
+/*
+ * csid_link_setup - Setup CSID connections
+ * @entity: Pointer to media entity structure
+ * @local: Pointer to local pad
+ * @remote: Pointer to remote pad
+ * @flags: Link flags
+ *
+ * Rreturn 0 on success
+ */
+static int csid_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ if ((local->flags & MEDIA_PAD_FL_SINK) &&
+ (flags & MEDIA_LNK_FL_ENABLED)) {
+ struct v4l2_subdev *sd;
+ struct csid_device *csid;
+ struct csiphy_device *csiphy;
+ struct camss_csiphy_lanes_cfg *lanecfg;
+
+ sd = container_of(entity, struct v4l2_subdev, entity);
+ csid = v4l2_get_subdevdata(sd);
+
+ /* If test generator is enabled
+ * do not allow a link from CSIPHY to CSID */
+ if (csid->testgen_mode->cur.val != 0)
+ return -EBUSY;
+
+ sd = container_of(remote->entity, struct v4l2_subdev, entity);
+ csiphy = v4l2_get_subdevdata(sd);
+
+ /* If a sensor is not linked to CSIPHY
+ * do no allow a link from CSIPHY to CSID */
+ if (!csiphy->cfg.csi2)
+ return -EPERM;
+
+ csid->phy.csiphy_id = csiphy->id;
+
+ lanecfg = &csiphy->cfg.csi2->lanecfg;
+ csid->phy.lane_cnt = lanecfg->num_data;
+ csid->phy.lane_assign = csid_get_lane_assign(lanecfg);
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops csid_core_ops = {
+ .s_power = csid_set_power,
+};
+
+static const struct v4l2_subdev_video_ops csid_video_ops = {
+ .s_stream = csid_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops csid_pad_ops = {
+ .get_fmt = csid_get_format,
+ .set_fmt = csid_set_format,
+};
+
+static const struct v4l2_subdev_ops csid_v4l2_ops = {
+ .core = &csid_core_ops,
+ .video = &csid_video_ops,
+ .pad = &csid_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops csid_v4l2_internal_ops;
+
+static const struct media_entity_operations csid_media_ops = {
+ .link_setup = csid_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * msm_csid_register_entities - Register subdev node for CSID module
+ * @csid: CSID device
+ * @v4l2_dev: V4L2 device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int msm_csid_register_entities(struct csid_device *csid,
+ struct v4l2_device *v4l2_dev)
+{
+ struct v4l2_subdev *sd = &csid->subdev;
+ struct media_pad *pads = csid->pads;
+ int ret;
+
+ v4l2_subdev_init(sd, &csid_v4l2_ops);
+ sd->internal_ops = &csid_v4l2_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d",
+ MSM_CSID_NAME, csid->id);
+ v4l2_set_subdevdata(sd, csid);
+
+ v4l2_ctrl_handler_init(&csid->ctrls, 1);
+ csid->testgen_mode = v4l2_ctrl_new_std_menu_items(&csid->ctrls,
+ &csid_ctrl_ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(csid_test_pattern_menu) - 1, 0, 0,
+ csid_test_pattern_menu);
+
+ if (csid->ctrls.error) {
+ dev_err(csid->camss->dev, "failed to init ctrl: %d\n",
+ csid->ctrls.error);
+ ret = csid->ctrls.error;
+ goto free_ctrl;
+ }
+
+ csid->subdev.ctrl_handler = &csid->ctrls;
+
+ pads[MSM_CSID_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[MSM_CSID_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
+
+ sd->entity.ops = &csid_media_ops;
+ ret = media_entity_init(&sd->entity, MSM_CSID_PADS_NUM, pads, 0);
+ if (ret < 0) {
+ dev_err(csid->camss->dev, "failed to init media entity");
+ goto free_ctrl;
+ }
+
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret < 0) {
+ dev_err(csid->camss->dev, "failed to register subdev");
+ goto media_cleanup;
+ }
+
+ return 0;
+
+media_cleanup:
+ media_entity_cleanup(&sd->entity);
+free_ctrl:
+ v4l2_ctrl_handler_free(&csid->ctrls);
+
+ return ret;
+}
+
+/*
+ * msm_csid_unregister_entities - Unregister CSID module subdev node
+ * @csid: CSID device
+ */
+void msm_csid_unregister_entities(struct csid_device *csid)
+{
+ v4l2_device_unregister_subdev(&csid->subdev);
+ v4l2_ctrl_handler_free(&csid->ctrls);
+}
--- /dev/null
+/*
+ * csid.h
+ *
+ * Qualcomm MSM Camera Subsystem - CSID Module
+ *
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2016 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef QC_MSM_CAMSS_CSID_H
+#define QC_MSM_CAMSS_CSID_H
+
+#include <linux/clk.h>
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/v4l2-subdev.h>
+
+#define MSM_CSID_PAD_SINK 0
+#define MSM_CSID_PAD_SRC 1
+#define MSM_CSID_PADS_NUM 2
+
+enum csid_payload_mode {
+ CSID_PAYLOAD_MODE_INCREMENTING = 0,
+ CSID_PAYLOAD_MODE_ALTERNATING_55_AA = 1,
+ CSID_PAYLOAD_MODE_ALL_ZEROES = 2,
+ CSID_PAYLOAD_MODE_ALL_ONES = 3,
+ CSID_PAYLOAD_MODE_RANDOM = 4,
+ CSID_PAYLOAD_MODE_USER_SPECIFIED = 5,
+};
+
+struct csid_testgen_config {
+ u8 enabled;
+ enum csid_payload_mode payload_mode;
+};
+
+struct csid_phy_config {
+ u8 csiphy_id;
+ u8 lane_cnt;
+ u32 lane_assign;
+};
+
+struct camss;
+
+struct csid_device {
+ u8 id;
+ struct v4l2_subdev subdev;
+ struct media_pad pads[MSM_CSID_PADS_NUM];
+ struct camss *camss;
+ void __iomem *base;
+ u32 irq;
+ struct clk **clock;
+ s32 *clock_rate;
+ int nclocks;
+ struct regulator *vdda;
+ struct completion reset_complete;
+ struct csid_testgen_config testgen;
+ struct csid_phy_config phy;
+ struct v4l2_mbus_framefmt fmt[MSM_CSID_PADS_NUM];
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *testgen_mode;
+};
+
+struct resources;
+
+int msm_csid_subdev_init(struct csid_device *csid, struct camss *camss,
+ struct resources *res, u8 id);
+
+int msm_csid_register_entities(struct csid_device *csid,
+ struct v4l2_device *v4l2_dev);
+
+void msm_csid_unregister_entities(struct csid_device *csid);
+
+#endif /* QC_MSM_CAMSS_CSID_H */
--- /dev/null
+/*
+ * csiphy.c
+ *
+ * Qualcomm MSM Camera Subsystem - CSIPHY Module
+ *
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2016 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "csiphy.h"
+#include "camss.h"
+
+#define MSM_CSIPHY_NAME "msm_csiphy"
+
+#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n))
+#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n))
+#define CAMSS_CSI_PHY_LNn_MISC1(n) (0x028 + 0x40 * (n))
+#define CAMSS_CSI_PHY_LNn_TEST_IMP(n) (0x01c + 0x40 * (n))
+#define CAMSS_CSI_PHY_GLBL_RESET 0x140
+#define CAMSS_CSI_PHY_GLBL_PWR_CFG 0x144
+#define CAMSS_CSI_PHY_GLBL_IRQ_CMD 0x164
+#define CAMSS_CSI_PHY_HW_VERSION 0x188
+#define CAMSS_CSI_PHY_INTERRUPT_STATUSn(n) (0x18c + 0x4 * (n))
+#define CAMSS_CSI_PHY_INTERRUPT_MASKn(n) (0x1ac + 0x4 * (n))
+#define CAMSS_CSI_PHY_INTERRUPT_CLEARn(n) (0x1cc + 0x4 * (n))
+#define CAMSS_CSI_PHY_GLBL_T_INIT_CFG0 0x1ec
+#define CAMSS_CSI_PHY_T_WAKEUP_CFG0 0x1f4
+
+/*
+ * csiphy_isr - CSIPHY module interrupt handler
+ * @irq: Interrupt line
+ * @dev: CSIPHY device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t csiphy_isr(int irq, void *dev)
+{
+ struct csiphy_device *csiphy = dev;
+ u8 val[8];
+ u8 i;
+
+ for (i = 0; i < 8; i++) {
+ val[i] = readl_relaxed(csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_STATUSn(i));
+ writel_relaxed(val[i], csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_CLEARn(i));
+ writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD);
+ writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD);
+ writel_relaxed(0x0, csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_CLEARn(i));
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csiphy_reset - Perform software reset on CSIPHY module
+ * @csiphy: CSIPHY device
+ */
+static void csiphy_reset(struct csiphy_device *csiphy)
+{
+ writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
+ usleep_range(5000, 8000);
+ writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
+}
+
+/*
+ * csiphy_enable_clocks - Enable clocks for CSIPHY module and
+ * set clock rates where needed
+ * @csiphy: CSIPHY device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csiphy_enable_clocks(struct csiphy_device *csiphy)
+{
+ long clk_rate;
+ int ret;
+ int i;
+
+ for (i = 0; i < csiphy->nclocks; i++) {
+ if (csiphy->clock_rate[i]) {
+ clk_rate = clk_round_rate(csiphy->clock[i],
+ csiphy->clock_rate[i]);
+ if (clk_rate < 0) {
+ dev_err(csiphy->camss->dev, "round failed\n");
+ ret = clk_rate;
+ goto error;
+ }
+ ret = clk_set_rate(csiphy->clock[i], clk_rate);
+ if (ret < 0) {
+ dev_err(csiphy->camss->dev, "set rate failed\n");
+ goto error;
+ }
+ }
+ ret = clk_prepare_enable(csiphy->clock[i]);
+ if (ret) {
+ dev_err(csiphy->camss->dev, "clk enable failed\n");
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ for (i--; i >= 0; i--)
+ clk_disable_unprepare(csiphy->clock[i]);
+
+ return ret;
+}
+
+/*
+ * csiphy_disable_clocks - Disable clocks for CSIPHY module
+ * @csiphy: CSIPHY device
+ */
+static void csiphy_disable_clocks(struct csiphy_device *csiphy)
+{
+ int i;
+
+ for (i = csiphy->nclocks - 1; i >= 0; i--)
+ clk_disable_unprepare(csiphy->clock[i]);
+}
+
+/*
+ * csiphy_set_power - Power on/off CSIPHY module
+ * @sd: CSIPHY V4L2 subdevice
+ * @on: Requested power state
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csiphy_set_power(struct v4l2_subdev *sd, int on)
+{
+ struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
+ int ret;
+
+ dev_err(csiphy->camss->dev, "%s: Enter, csiphy%d on = %d\n",
+ __func__, csiphy->id, on);
+
+ if (on) {
+ u8 hw_version;
+
+ ret = csiphy_enable_clocks(csiphy);
+ if (ret < 0)
+ return ret;
+
+ enable_irq(csiphy->irq);
+
+ csiphy_reset(csiphy);
+
+ hw_version = readl(csiphy->base + CAMSS_CSI_PHY_HW_VERSION);
+ dev_err(csiphy->camss->dev, "CSIPHY HW Version = 0x%02x\n",
+ hw_version);
+ } else {
+ disable_irq(csiphy->irq);
+
+ csiphy_disable_clocks(csiphy);
+ }
+
+ dev_err(csiphy->camss->dev, "%s: Exit csiphy%d on = %d\n",
+ __func__, csiphy->id, on);
+
+ return 0;
+}
+
+/*
+ * csiphy_get_lane_mask - Calculate CSI2 lane mask configuration parameter
+ * @lane_cfg - CSI2 lane configuration
+ *
+ * Return lane mask
+ */
+static int csiphy_get_lane_mask(struct camss_csiphy_lanes_cfg *lane_cfg)
+{
+ u16 lane_mask;
+ int i;
+
+ lane_mask = 1 << lane_cfg->clk.pos;
+
+ for (i = 0; i < lane_cfg->num_data; i++)
+ lane_mask |= 1 << lane_cfg->data[i].pos;
+
+ return lane_mask;
+}
+
+/*
+ * csiphy_set_stream - Enable/disable streaming on CSIPHY module
+ * @sd: CSIPHY V4L2 subdevice
+ * @enable: Requested streaming state
+ *
+ * Main configuration of CSIPHY module is also done here.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csiphy_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
+ struct csiphy_config *cfg = &csiphy->cfg;
+ u16 lane_mask = csiphy_get_lane_mask(&cfg->csi2->lanecfg);
+ u8 i;
+ u8 val;
+
+ dev_err(csiphy->camss->dev, "%s: Enter, csiphy%d enable = %d\n",
+ __func__, csiphy->id, enable);
+
+ if (enable) {
+ val = readl(csiphy->base_clk_mux);
+ if (cfg->combo_mode && (lane_mask & 0x18) == 0x18) {
+ val &= ~0xf0;
+ val |= cfg->csid_id << 4;
+ } else {
+ val &= ~0xf;
+ val |= cfg->csid_id;
+ }
+ writel_relaxed(val, csiphy->base_clk_mux);
+
+ writel_relaxed(0x1, csiphy->base +
+ CAMSS_CSI_PHY_GLBL_T_INIT_CFG0);
+ writel_relaxed(0x1, csiphy->base +
+ CAMSS_CSI_PHY_T_WAKEUP_CFG0);
+
+ val = 0x1;
+ val |= lane_mask << 1;
+ writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG);
+
+ val = cfg->combo_mode << 4;
+ writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
+
+ lane_mask &= 0x1f;
+ i = 0;
+ while (lane_mask & 0x1f) {
+ if (!(lane_mask & 0x1)) {
+ i++;
+ lane_mask >>= 1;
+ continue;
+ }
+
+ writel_relaxed(0x10, csiphy->base +
+ CAMSS_CSI_PHY_LNn_CFG2(i));
+ writel_relaxed(cfg->csi2->settle_cnt, csiphy->base +
+ CAMSS_CSI_PHY_LNn_CFG3(i));
+
+ writel_relaxed(0x3f, csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_MASKn(i));
+ writel_relaxed(0x3f, csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_CLEARn(i));
+
+ i++;
+ lane_mask >>= 1;
+ }
+ } else {
+ i = 0;
+ while (lane_mask) {
+ if (lane_mask & 0x1) {
+ writel_relaxed(0x0, csiphy->base +
+ CAMSS_CSI_PHY_LNn_CFG2(i));
+ writel_relaxed(0x0, csiphy->base +
+ CAMSS_CSI_PHY_LNn_MISC1(i));
+ writel_relaxed(0x0, csiphy->base +
+ CAMSS_CSI_PHY_LNn_TEST_IMP(i));
+ }
+
+ lane_mask >>= 1;
+ i++;
+ }
+
+ writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_LNn_CFG2(4));
+ writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG);
+ }
+
+ return 0;
+}
+
+/*
+ * __csiphy_get_format - Get pointer to format structure
+ * @csiphy: CSIPHY device
+ * @cfg: V4L2 subdev pad configuration
+ * @pad: pad from which format is requested
+ * @which: TRY or ACTIVE format
+ *
+ * Return pointer to TRY or ACTIVE format structure
+ */
+static struct v4l2_mbus_framefmt *
+__csiphy_get_format(struct csiphy_device *csiphy,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&csiphy->subdev, cfg, pad);
+
+ return &csiphy->fmt[pad];
+}
+
+/*
+ * csiphy_get_format - Handle get format by pads subdev method
+ * @sd: CSIPHY V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: pointer to v4l2 subdev format structure
+ *
+ * Return -EINVAL or zero on success
+ */
+static int csiphy_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __csiphy_get_format(csiphy, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+/*
+ * csiphy_set_format - Handle set format by pads subdev method
+ * @sd: CSIPHY V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: pointer to v4l2 subdev format structure
+ *
+ * Return -EINVAL or zero on success
+ */
+static int csiphy_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (fmt->pad == MSM_CSIPHY_PAD_SINK) {
+ /* Set format on sink pad */
+ format = __csiphy_get_format(csiphy, cfg, fmt->pad,
+ fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ *format = fmt->format;
+
+ /* Reset format on source pad */
+ format = __csiphy_get_format(csiphy, cfg, MSM_CSIPHY_PAD_SRC,
+ fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ *format = fmt->format;
+ } else {
+ /* Source pad format must be same as sink pad format, */
+ /* so just return source pad format */
+ format = __csiphy_get_format(csiphy, cfg, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ }
+
+ return 0;
+}
+
+/*
+ * msm_csiphy_subdev_init - Initialize CSIPHY device structure and resources
+ * @csiphy: CSIPHY device
+ * @camss: Camera sub-system structure
+ * @res: CSIPHY module resources table
+ * @id: CSIPHY module id
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int msm_csiphy_subdev_init(struct csiphy_device *csiphy, struct camss *camss,
+ struct resources *res, u8 id)
+{
+ struct device *dev = camss->dev;
+ struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+ struct resource *r;
+ int i;
+ int ret;
+
+ csiphy->camss = camss;
+
+ dev_err(csiphy->camss->dev, "%s: Enter\n", __func__);
+
+ csiphy->id = id;
+
+ csiphy->cfg.combo_mode = 0;
+
+ /* Memory */
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
+ csiphy->base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(csiphy->base)) {
+ dev_err(dev, "could not map memory\n");
+ return PTR_ERR(csiphy->base);
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[1]);
+ csiphy->base_clk_mux = devm_ioremap_resource(dev, r);
+ if (IS_ERR(csiphy->base_clk_mux)) {
+ dev_err(dev, "could not map memory\n");
+ return PTR_ERR(csiphy->base_clk_mux);
+ }
+
+ /* Interrupt */
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res->interrupt[0]);
+ csiphy->irq = r->start;
+ if (IS_ERR_VALUE(csiphy->irq))
+ return csiphy->irq;
+
+ ret = devm_request_irq(dev, csiphy->irq, csiphy_isr,
+ IRQF_TRIGGER_RISING, dev_name(dev), csiphy);
+ if (ret < 0) {
+ dev_err(dev, "request_irq failed\n");
+ return ret;
+ }
+
+ disable_irq(csiphy->irq);
+
+ /* Clocks */
+
+ i = 0;
+ csiphy->nclocks = 0;
+ while (res->clock[i++])
+ csiphy->nclocks++;
+
+ csiphy->clock = devm_kzalloc(dev, csiphy->nclocks *
+ sizeof(*csiphy->clock), GFP_KERNEL);
+ if (!csiphy->clock) {
+ dev_err(dev, "could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ csiphy->clock_rate = devm_kzalloc(dev, csiphy->nclocks *
+ sizeof(*csiphy->clock_rate), GFP_KERNEL);
+ if (!csiphy->clock_rate) {
+ dev_err(dev, "could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < csiphy->nclocks; i++) {
+ csiphy->clock[i] = devm_clk_get(dev, res->clock[i]);
+ if (IS_ERR(csiphy->clock[i]))
+ return PTR_ERR(csiphy->clock[i]);
+ csiphy->clock_rate[i] = res->clock_rate[i];
+ }
+
+ dev_err(csiphy->camss->dev, "%s: Exit\n", __func__);
+
+ return 0;
+}
+
+/*
+ * csiphy_link_setup - Setup CSIPHY connections
+ * @entity: Pointer to media entity structure
+ * @local: Pointer to local pad
+ * @remote: Pointer to remote pad
+ * @flags: Link flags
+ *
+ * Rreturn 0 on success
+ */
+static int csiphy_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ if ((local->flags & MEDIA_PAD_FL_SOURCE) &&
+ (flags & MEDIA_LNK_FL_ENABLED)) {
+ struct v4l2_subdev *sd;
+ struct csiphy_device *csiphy;
+ struct csid_device *csid;
+
+ sd = container_of(entity, struct v4l2_subdev, entity);
+ csiphy = v4l2_get_subdevdata(sd);
+
+ sd = container_of(remote->entity, struct v4l2_subdev, entity);
+ csid = v4l2_get_subdevdata(sd);
+
+ csiphy->cfg.csid_id = csid->id;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops csiphy_core_ops = {
+ .s_power = csiphy_set_power,
+};
+
+static const struct v4l2_subdev_video_ops csiphy_video_ops = {
+ .s_stream = csiphy_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops csiphy_pad_ops = {
+ .get_fmt = csiphy_get_format,
+ .set_fmt = csiphy_set_format,
+};
+
+static const struct v4l2_subdev_ops csiphy_v4l2_ops = {
+ .core = &csiphy_core_ops,
+ .video = &csiphy_video_ops,
+ .pad = &csiphy_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops csiphy_v4l2_internal_ops;
+
+static const struct media_entity_operations csiphy_media_ops = {
+ .link_setup = csiphy_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * msm_csiphy_register_entities - Register subdev node for CSIPHY module
+ * @csiphy: CSIPHY device
+ * @v4l2_dev: V4L2 device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int msm_csiphy_register_entities(struct csiphy_device *csiphy,
+ struct v4l2_device *v4l2_dev)
+{
+ struct v4l2_subdev *sd = &csiphy->subdev;
+ struct media_pad *pads = csiphy->pads;
+ int ret;
+
+ v4l2_subdev_init(sd, &csiphy_v4l2_ops);
+ sd->internal_ops = &csiphy_v4l2_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d",
+ MSM_CSIPHY_NAME, csiphy->id);
+ v4l2_set_subdevdata(sd, csiphy);
+
+ pads[MSM_CSIPHY_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[MSM_CSIPHY_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
+
+ sd->entity.ops = &csiphy_media_ops;
+ ret = media_entity_init(&sd->entity, MSM_CSIPHY_PADS_NUM, pads, 0);
+ if (ret < 0) {
+ pr_err("Fail to init media entity");
+ return ret;
+ }
+
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret < 0) {
+ pr_err("Fail to register subdev");
+ media_entity_cleanup(&sd->entity);
+ }
+
+ return ret;
+}
+
+/*
+ * msm_csiphy_unregister_entities - Unregister CSIPHY module subdev node
+ * @csiphy: CSIPHY device
+ */
+void msm_csiphy_unregister_entities(struct csiphy_device *csiphy)
+{
+ v4l2_device_unregister_subdev(&csiphy->subdev);
+}
--- /dev/null
+/*
+ * csiphy.h
+ *
+ * Qualcomm MSM Camera Subsystem - CSIPHY Module
+ *
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2016 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef QC_MSM_CAMSS_CSIPHY_H
+#define QC_MSM_CAMSS_CSIPHY_H
+
+#include <linux/clk.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/v4l2-subdev.h>
+
+#define MSM_CSIPHY_PAD_SINK 0
+#define MSM_CSIPHY_PAD_SRC 1
+#define MSM_CSIPHY_PADS_NUM 2
+
+struct camss_csi2_cfg;
+
+struct csiphy_config {
+ u8 combo_mode;
+ u32 csid_id;
+ struct camss_csi2_cfg *csi2;
+};
+
+struct camss;
+
+struct csiphy_device {
+ u8 id;
+ struct v4l2_subdev subdev;
+ struct media_pad pads[MSM_CSIPHY_PADS_NUM];
+ struct camss *camss;
+ void __iomem *base;
+ void __iomem *base_clk_mux;
+ u32 irq;
+ struct clk **clock;
+ s32 *clock_rate;
+ int nclocks;
+ struct csiphy_config cfg;
+ struct v4l2_mbus_framefmt fmt[MSM_CSIPHY_PADS_NUM];
+};
+
+struct resources;
+
+int msm_csiphy_subdev_init(struct csiphy_device *csiphy, struct camss *camss,
+ struct resources *res, u8 id);
+
+int msm_csiphy_register_entities(struct csiphy_device *csiphy,
+ struct v4l2_device *v4l2_dev);
+
+void msm_csiphy_unregister_entities(struct csiphy_device *csiphy);
+
+#endif /* QC_MSM_CAMSS_CSIPHY_H */
--- /dev/null
+/*
+ * ispif.c
+ *
+ * Qualcomm MSM Camera Subsystem - ISPIF Module
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2016 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "ispif.h"
+#include "camss.h"
+
+#define MSM_ISPIF_NAME "msm_ispif"
+
+#define ISPIF_RST_CMD_0 0x008
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD 0x01c
+#define ISPIF_VFE_m_CTRL_0(m) (0x200 + 0x200 * (m))
+#define ISPIF_VFE_m_CTRL_0_PIX0_LINE_BUF_EN (1 << 6)
+#define ISPIF_VFE_m_IRQ_MASK_0(m) (0x208 + 0x200 * (m))
+#define ISPIF_VFE_m_IRQ_MASK_0_ENABLE 0x0a493249
+#define ISPIF_VFE_m_IRQ_MASK_1(m) (0x20c + 0x200 * (m))
+#define ISPIF_VFE_m_IRQ_MASK_1_ENABLE 0x02493249
+#define ISPIF_VFE_m_IRQ_MASK_2(m) (0x210 + 0x200 * (m))
+#define ISPIF_VFE_m_IRQ_MASK_2_ENABLE 0x00001249
+#define ISPIF_VFE_m_IRQ_STATUS_0(m) (0x21c + 0x200 * (m))
+#define ISPIF_VFE_m_IRQ_STATUS_1(m) (0x220 + 0x200 * (m))
+#define ISPIF_VFE_m_IRQ_STATUS_2(m) (0x224 + 0x200 * (m))
+#define ISPIF_VFE_m_IRQ_CLEAR_0(m) (0x230 + 0x200 * (m))
+#define ISPIF_VFE_m_IRQ_CLEAR_1(m) (0x234 + 0x200 * (m))
+#define ISPIF_VFE_m_IRQ_CLEAR_2(m) (0x238 + 0x200 * (m))
+#define ISPIF_VFE_m_INTF_INPUT_SEL(m) (0x244 + 0x200 * (m))
+#define ISPIF_VFE_m_INTF_CMD_0(m) (0x248 + 0x200 * (m))
+#define ISPIF_VFE_m_INTF_CMD_1(m) (0x24c + 0x200 * (m))
+#define ISPIF_VFE_m_PIX_INTF_n_CID_MASK(m, n) (0x254 + 0x200 * (m) + 0x4 * (n))
+#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) (0x264 + 0x200 * (m) + 0x4 * (n))
+#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n) (0x2c0 + 0x200 * (m) + 0x4 * (n))
+#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) (0x2d0 + 0x200 * (m) + 0x4 * (n))
+
+#define CSI_RDI_CLK_MUX_SEL 0x008
+
+#define ISPIF_TIMEOUT_SLEEP_US 1000
+#define ISPIF_TIMEOUT_ALL_US 1000000
+
+enum ispif_intf {
+ PIX0,
+ RDI0,
+ PIX1,
+ RDI1,
+ RDI2
+};
+
+enum ispif_intf_cmd {
+ CMD_DISABLE_FRAME_BOUNDARY = 0x0,
+ CMD_ENABLE_FRAME_BOUNDARY = 0x1,
+ CMD_DISABLE_IMMEDIATELY = 0x2,
+ CMD_ALL_DISABLE_IMMEDIATELY = 0xaaaaaaaa,
+ CMD_ALL_NO_CHANGE = 0xffffffff,
+};
+
+/*
+ * ispif_isr - ISPIF module interrupt handler
+ * @irq: Interrupt line
+ * @dev: ISPIF device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t ispif_isr(int irq, void *dev)
+{
+ struct ispif_device *ispif = dev;
+ u32 value0, value1, value2;
+
+ value0 = readl(ispif->base + ISPIF_VFE_m_IRQ_STATUS_0(0));
+ value1 = readl(ispif->base + ISPIF_VFE_m_IRQ_STATUS_1(0));
+ value2 = readl(ispif->base + ISPIF_VFE_m_IRQ_STATUS_2(0));
+
+ writel(value0, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(0));
+ writel(value1, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(0));
+ writel(value2, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(0));
+
+ wmb();
+ writel(0x1, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD);
+ wmb();
+
+ if ((value0 >> 27) & 0x1)
+ complete(&ispif->reset_complete);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * ispif_enable_clocks - Enable clocks for ISPIF module
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int ispif_enable_clocks(int nclocks, struct clk **clock,
+ u8 *clock_for_reset, u8 reset)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < nclocks; i++) {
+ if (clock_for_reset[i] == reset) {
+ ret = clk_prepare_enable(clock[i]);
+ if (ret) {
+ pr_err("clock enable failed\n");
+ goto error;
+ }
+ }
+ }
+
+ return 0;
+
+error:
+ for (i--; i >= 0; i--)
+ if (clock_for_reset[i] == reset)
+ clk_disable_unprepare(clock[i]);
+
+ return ret;
+}
+
+/*
+ * ispif_disable_clocks - Disable clocks for ISPIF module
+ */
+static void ispif_disable_clocks(int nclocks, struct clk **clock,
+ u8 *clock_for_reset, u8 reset)
+{
+ int i;
+
+ for (i = nclocks - 1; i >= 0; i--)
+ if (clock_for_reset[i] == reset)
+ clk_disable_unprepare(clock[i]);
+}
+
+/*
+ * ispif_set_power - Power on/off ISPIF module
+ * @sd: ISPIF V4L2 subdevice
+ * @on: Requested power state
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int ispif_set_power(struct v4l2_subdev *sd, int on)
+{
+ struct ispif_device *ispif = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ dev_err(ispif->camss->dev, "%s: Enter, on = %d\n",
+ __func__, on);
+
+ if (on)
+ ret = ispif_enable_clocks(ispif->nclocks, ispif->clock,
+ ispif->clock_for_reset, 0);
+ else
+ ispif_disable_clocks(ispif->nclocks, ispif->clock,
+ ispif->clock_for_reset, 0);
+
+ dev_err(ispif->camss->dev, "%s: Exit, on = %d\n",
+ __func__, on);
+
+ return ret;
+}
+
+static int ispif_reset(struct ispif_device *ispif)
+{
+ int ret;
+
+ ret = ispif_enable_clocks(ispif->nclocks, ispif->clock,
+ ispif->clock_for_reset, 1);
+ if (ret < 0)
+ goto exit;
+
+ writel(0xfe0f1fff, ispif->base + ISPIF_RST_CMD_0);
+ wait_for_completion(&ispif->reset_complete);
+
+ ispif_disable_clocks(ispif->nclocks, ispif->clock,
+ ispif->clock_for_reset, 1);
+
+exit:
+ return ret;
+}
+
+static void ispif_reset_sw(struct ispif_device *ispif, u8 vfe)
+{
+
+ writel_relaxed(ISPIF_VFE_m_CTRL_0_PIX0_LINE_BUF_EN,
+ ispif->base + ISPIF_VFE_m_CTRL_0(vfe));
+ writel_relaxed(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe));
+ writel_relaxed(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe));
+ writel_relaxed(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(vfe));
+ writel_relaxed(0xffffffff, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(vfe));
+ writel_relaxed(0xffffffff, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(vfe));
+ writel_relaxed(0xffffffff, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(vfe));
+
+ writel_relaxed(0, ispif->base + ISPIF_VFE_m_INTF_INPUT_SEL(vfe));
+
+ writel_relaxed(CMD_ALL_NO_CHANGE,
+ ispif->base + ISPIF_VFE_m_INTF_CMD_0(vfe));
+ writel_relaxed(CMD_ALL_NO_CHANGE,
+ ispif->base + ISPIF_VFE_m_INTF_CMD_1(vfe));
+
+ writel_relaxed(0,
+ ispif->base + ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe, 0));
+ writel_relaxed(0,
+ ispif->base + ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe, 1));
+ writel_relaxed(0,
+ ispif->base + ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 0));
+ writel_relaxed(0,
+ ispif->base + ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 1));
+ writel_relaxed(0,
+ ispif->base + ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 2));
+
+ wmb();
+ writel_relaxed(0x1, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD);
+ wmb();
+}
+
+static void ispif_select_clk_mux(struct ispif_device *ispif,
+ enum ispif_intf intf, u8 csid, u8 vfe)
+{
+ u32 val = 0;
+
+ switch (intf) {
+ case PIX0:
+ val = readl_relaxed(ispif->base_clk_mux);
+ val &= ~(0xf << (vfe * 8));
+ val |= (csid << (vfe * 8));
+ writel_relaxed(val, ispif->base_clk_mux);
+ break;
+
+ case RDI0:
+ val = readl_relaxed(ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL);
+ val &= ~(0xf << (vfe * 12));
+ val |= (csid << (vfe * 12));
+ writel_relaxed(val, ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL);
+ break;
+
+ case PIX1:
+ val = readl_relaxed(ispif->base_clk_mux);
+ val &= ~(0xf << (4 + (vfe * 8)));
+ val |= (csid << (4 + (vfe * 8)));
+ writel_relaxed(val, ispif->base_clk_mux);
+ break;
+
+ case RDI1:
+ val = readl_relaxed(ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL);
+ val &= ~(0xf << (4 + (vfe * 12)));
+ val |= (csid << (4 + (vfe * 12)));
+ writel_relaxed(val, ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL);
+ break;
+
+ case RDI2:
+ val = readl_relaxed(ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL);
+ val &= ~(0xf << (8 + (vfe * 12)));
+ val |= (csid << (8 + (vfe * 12)));
+ writel_relaxed(val, ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL);
+ break;
+ }
+
+ mb();
+}
+
+static int ispif_validate_intf_status(struct ispif_device *ispif,
+ enum ispif_intf intf, u8 vfe)
+{
+ int ret = 0;
+ u32 val;
+
+ switch (intf) {
+ case PIX0:
+ val = readl_relaxed(ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 0));
+ break;
+ case RDI0:
+ val = readl_relaxed(ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 0));
+ break;
+ case PIX1:
+ val = readl_relaxed(ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 1));
+ break;
+ case RDI1:
+ val = readl_relaxed(ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 1));
+ break;
+ case RDI2:
+ val = readl_relaxed(ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 2));
+ break;
+ }
+
+ if ((val & 0xf) != 0xf)
+ ret = -EBUSY;
+
+ return ret;
+}
+
+static void ispif_select_csid(struct ispif_device *ispif,
+ enum ispif_intf intf, u8 csid, u8 vfe)
+{
+ u32 val;
+
+ val = readl_relaxed(ispif->base + ISPIF_VFE_m_INTF_INPUT_SEL(vfe));
+ switch (intf) {
+ case PIX0:
+ val &= ~(BIT(1) | BIT(0));
+ val |= csid;
+ break;
+ case RDI0:
+ val &= ~(BIT(5) | BIT(4));
+ val |= (csid << 4);
+ break;
+ case PIX1:
+ val &= ~(BIT(9) | BIT(8));
+ val |= (csid << 8);
+ break;
+ case RDI1:
+ val &= ~(BIT(13) | BIT(12));
+ val |= (csid << 12);
+ break;
+ case RDI2:
+ val &= ~(BIT(21) | BIT(20));
+ val |= (csid << 20);
+ break;
+ }
+
+ wmb();
+ writel_relaxed(val, ispif->base + ISPIF_VFE_m_INTF_INPUT_SEL(vfe));
+ wmb();
+}
+
+static void ispif_enable_cid(struct ispif_device *ispif, enum ispif_intf intf,
+ u16 cid_mask, u8 vfe, u8 enable)
+{
+ u32 addr, val;
+
+ switch (intf) {
+ case PIX0:
+ addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe, 0);
+ break;
+ case RDI0:
+ addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 0);
+ break;
+ case PIX1:
+ addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe, 1);
+ break;
+ case RDI1:
+ addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 1);
+ break;
+ case RDI2:
+ addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 2);
+ break;
+ }
+
+ val = readl_relaxed(ispif->base + addr);
+ if (enable)
+ val |= cid_mask;
+ else
+ val &= ~cid_mask;
+
+ wmb();
+ writel_relaxed(val, ispif->base + addr);
+ wmb();
+}
+
+static void ispif_config_irq(struct ispif_device *ispif, u8 vfe)
+{
+ u32 val;
+
+ val = ISPIF_VFE_m_IRQ_MASK_0_ENABLE;
+ writel(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe));
+ writel(val, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(vfe));
+ val = ISPIF_VFE_m_IRQ_MASK_1_ENABLE;
+ writel(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe));
+ writel(val, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(vfe));
+ val = ISPIF_VFE_m_IRQ_MASK_2_ENABLE;
+ writel(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(vfe));
+ writel(val, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(vfe));
+ wmb();
+ writel(0x1, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD);
+ wmb();
+}
+
+static void ispif_intf_cmd(struct ispif_device *ispif, u8 cmd,
+ enum ispif_intf intf, u8 vfe, u8 vc)
+{
+ u32 val = CMD_ALL_NO_CHANGE;
+
+ if (intf == RDI2) {
+ val &= ~(0x3 << (vc * 2 + 8));
+ val |= (cmd << (vc * 2 + 8));
+ wmb();
+ writel_relaxed(val, ispif->base + ISPIF_VFE_m_INTF_CMD_1(vfe));
+ wmb();
+ } else {
+ val &= ~(0x3 << (vc * 2 + intf * 8));
+ val |= (cmd << (vc * 2 + intf * 8));
+ wmb();
+ writel_relaxed(val, ispif->base + ISPIF_VFE_m_INTF_CMD_0(vfe));
+ wmb();
+ }
+}
+
+/*
+ * ispif_set_stream - Enable/disable streaming on ISPIF module
+ * @sd: ISPIF V4L2 subdevice
+ * @enable: Requested streaming state
+ *
+ * Main configuration of ISPIF module is also done here.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int ispif_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ispif_device *ispif = v4l2_get_subdevdata(sd);
+ enum ispif_intf ispif_intf = RDI0;
+ u8 vfe = 0;
+ u8 vc = 0; /* TODO: How to get this from sensor? */
+ u8 cid = vc * 4;
+
+ int ret;
+
+ dev_err(ispif->camss->dev, "%s: Enter, enable = %d\n",
+ __func__, enable);
+
+ if (enable) {
+ u8 csid = ispif->csid_id;
+
+ if (!media_entity_remote_pad(
+ &ispif->pads[MSM_ISPIF_PAD_SINK])) {
+ return -ENOLINK;
+ }
+
+ /* Reset */
+
+ ret = ispif_reset(ispif);
+ if (ret < 0)
+ return ret;
+
+ /* Config */
+
+ ispif_reset_sw(ispif, vfe);
+
+ ispif_select_clk_mux(ispif, ispif_intf, csid, vfe);
+
+ ret = ispif_validate_intf_status(ispif, ispif_intf, vfe);
+ if (ret < 0)
+ return ret;
+
+ ispif_select_csid(ispif, ispif_intf, csid, vfe);
+
+ ispif_enable_cid(ispif, ispif_intf, 1 << cid, vfe, 1);
+
+ ispif_config_irq(ispif, vfe);
+
+ ispif_intf_cmd(ispif, CMD_ENABLE_FRAME_BOUNDARY, ispif_intf, vfe, vc);
+ } else {
+ u32 stop_flag = 0;
+
+ ispif_intf_cmd(ispif, CMD_DISABLE_FRAME_BOUNDARY, ispif_intf, vfe, vc);
+
+ ret = readl_poll_timeout(ispif->base + ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 0),
+ stop_flag,
+ (stop_flag & 0xf) == 0xf,
+ ISPIF_TIMEOUT_SLEEP_US,
+ ISPIF_TIMEOUT_ALL_US);
+ if (ret < 0)
+ return ret;
+
+ ispif_enable_cid(ispif, ispif_intf, 1 << cid, vfe, 0);
+ }
+
+ return 0;
+}
+
+/*
+ * msm_ispif_subdev_init - Initialize ISPIF device structure and resources
+ * @ispif: ISPIF device
+ * @camss: Camera sub-system structure
+ * @res: ISPIF module resources table
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int msm_ispif_subdev_init(struct ispif_device *ispif, struct camss *camss,
+ struct resources_ispif *res)
+{
+ struct device *dev = camss->dev;
+ struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+ struct resource *r;
+ int i;
+ int ret;
+
+ ispif->camss = camss;
+
+ /* Memory */
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
+ ispif->base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(ispif->base)) {
+ dev_err(dev, "could not map memory\n");
+ return PTR_ERR(ispif->base);
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[1]);
+ ispif->base_clk_mux = devm_ioremap_resource(dev, r);
+ if (IS_ERR(ispif->base_clk_mux)) {
+ dev_err(dev, "could not map memory\n");
+ return PTR_ERR(ispif->base_clk_mux);
+ }
+
+ /* Interrupt */
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res->interrupt);
+ ispif->irq = r->start;
+ if (IS_ERR_VALUE(ispif->irq))
+ return ispif->irq;
+
+ ret = devm_request_irq(dev, ispif->irq, ispif_isr,
+ IRQF_TRIGGER_RISING, "ispif", ispif);
+ if (ret < 0) {
+ dev_err(dev, "request_irq failed\n");
+ return ret;
+ }
+
+ /* Clocks */
+
+ i = 0;
+ ispif->nclocks = 0;
+ while (res->clock[i++])
+ ispif->nclocks++;
+
+ ispif->clock = devm_kzalloc(dev, ispif->nclocks * sizeof(*ispif->clock),
+ GFP_KERNEL);
+ if (!ispif->clock) {
+ dev_err(dev, "could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ ispif->clock_for_reset = devm_kzalloc(dev, ispif->nclocks *
+ sizeof(*ispif->clock_for_reset), GFP_KERNEL);
+ if (!ispif->clock_for_reset) {
+ dev_err(dev, "could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ispif->nclocks; i++) {
+ ispif->clock[i] = devm_clk_get(dev, res->clock[i]);
+ if (IS_ERR(ispif->clock[i]))
+ return PTR_ERR(ispif->clock[i]);
+ ispif->clock_for_reset[i] = res->clock_for_reset[i];
+ }
+
+ init_completion(&ispif->reset_complete);
+
+ return 0;
+}
+
+static int ispif_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ if ((local->flags & MEDIA_PAD_FL_SINK) &&
+ (flags & MEDIA_LNK_FL_ENABLED)) {
+ struct v4l2_subdev *sd;
+ struct ispif_device *ispif;
+ struct csid_device *csid;
+
+ sd = container_of(entity, struct v4l2_subdev, entity);
+ ispif = v4l2_get_subdevdata(sd);
+
+ sd = container_of(remote->entity, struct v4l2_subdev, entity);
+ csid = v4l2_get_subdevdata(sd);
+
+ ispif->csid_id = csid->id;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops ispif_core_ops = {
+ .s_power = ispif_set_power,
+};
+
+static const struct v4l2_subdev_video_ops ispif_video_ops = {
+ .s_stream = ispif_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ispif_pad_ops;
+
+static const struct v4l2_subdev_ops ispif_v4l2_ops = {
+ .core = &ispif_core_ops,
+ .video = &ispif_video_ops,
+ .pad = &ispif_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops ispif_v4l2_internal_ops;
+
+static const struct media_entity_operations ispif_media_ops = {
+ .link_setup = ispif_link_setup,
+};
+
+/*
+ * msm_ispif_register_entities - Register subdev node for ISPIF module
+ * @ispif: ISPIF device
+ * @v4l2_dev: V4L2 device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int msm_ispif_register_entities(struct ispif_device *ispif,
+ struct v4l2_device *v4l2_dev)
+{
+ struct v4l2_subdev *sd = &ispif->subdev;
+ struct media_pad *pads = ispif->pads;
+ int ret;
+
+ v4l2_subdev_init(sd, &ispif_v4l2_ops);
+ sd->internal_ops = &ispif_v4l2_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, ARRAY_SIZE(sd->name), MSM_ISPIF_NAME);
+ v4l2_set_subdevdata(sd, ispif);
+
+ pads[MSM_ISPIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[MSM_ISPIF_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
+
+ sd->entity.ops = &ispif_media_ops;
+ ret = media_entity_init(&sd->entity, MSM_ISPIF_PADS_NUM, pads, 0);
+ if (ret < 0) {
+ pr_err("Fail to init media entity");
+ return ret;
+ }
+
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret < 0) {
+ pr_err("Fail to register subdev");
+ media_entity_cleanup(&sd->entity);
+ }
+
+ return ret;
+}
+
+/*
+ * msm_ispif_unregister_entities - Unregister ISPIF module subdev node
+ * @ispif: ISPIF device
+ */
+void msm_ispif_unregister_entities(struct ispif_device *ispif)
+{
+ v4l2_device_unregister_subdev(&ispif->subdev);
+}
--- /dev/null
+/*
+ * ispif.h
+ *
+ * Qualcomm MSM Camera Subsystem - ISPIF Module
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2016 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef QC_MSM_CAMSS_ISPIF_H
+#define QC_MSM_CAMSS_ISPIF_H
+
+#include <linux/clk.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#define MSM_ISPIF_PAD_SINK 0
+#define MSM_ISPIF_PAD_SRC 1
+#define MSM_ISPIF_PADS_NUM 2
+
+struct camss;
+
+struct ispif_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[MSM_ISPIF_PADS_NUM];
+ struct camss *camss;
+ void __iomem *base;
+ void __iomem *base_clk_mux;
+ u32 irq;
+ struct clk **clock;
+ u8 *clock_for_reset;
+ int nclocks;
+ struct completion reset_complete;
+ u8 csid_id;
+};
+
+struct resources_ispif;
+
+int msm_ispif_subdev_init(struct ispif_device *ispif, struct camss *camss,
+ struct resources_ispif *res);
+
+int msm_ispif_register_entities(struct ispif_device *ispif,
+ struct v4l2_device *v4l2_dev);
+
+void msm_ispif_unregister_entities(struct ispif_device *ispif);
+
+#endif /* QC_MSM_CAMSS_ISPIF_H */
--- /dev/null
+/*
+ * vfe.c
+ *
+ * Qualcomm MSM Camera Subsystem - VFE Module
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2016 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <asm/dma-iommu.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/msm-bus.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/qcom_iommu.h>
+#include <linux/spinlock_types.h>
+#include <linux/spinlock.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "vfe.h"
+#include "camss.h"
+
+#define MSM_VFE_DRV_NAME "msm_vfe"
+#define MSM_VFE_VIDEO_NAME "msm_vfe_video"
+
+#define VFE_0_HW_VERSION 0x000
+
+#define VFE_0_GLOBAL_RESET_CMD 0x00c
+#define VFE_0_GLOBAL_RESET_CMD_CORE (1 << 0)
+#define VFE_0_GLOBAL_RESET_CMD_CAMIF (1 << 1)
+#define VFE_0_GLOBAL_RESET_CMD_BUS (1 << 2)
+#define VFE_0_GLOBAL_RESET_CMD_BUS_BDG (1 << 3)
+#define VFE_0_GLOBAL_RESET_CMD_REGISTER (1 << 4)
+#define VFE_0_GLOBAL_RESET_CMD_TIMER (1 << 5)
+#define VFE_0_GLOBAL_RESET_CMD_PM (1 << 6)
+#define VFE_0_GLOBAL_RESET_CMD_BUS_MISR (1 << 7)
+#define VFE_0_GLOBAL_RESET_CMD_TESTGEN (1 << 8)
+
+#define VFE_0_IRQ_CMD 0x024
+#define VFE_0_IRQ_CMD_GLOBAL_CLEAR (1 << 0)
+
+#define VFE_0_IRQ_MASK_0 0x028
+#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_0_PING_PONG (1 << 8)
+#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_1_PING_PONG (1 << 9)
+#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_2_PING_PONG (1 << 10)
+#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_3_PING_PONG (1 << 11)
+#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_4_PING_PONG (1 << 12)
+#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_5_PING_PONG (1 << 13)
+#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_6_PING_PONG (1 << 14)
+#define VFE_0_IRQ_MASK_0_RESET_ACK (1 << 31)
+#define VFE_0_IRQ_MASK_1 0x02c
+#define VFE_0_IRQ_MASK_1_VIOLATION (1 << 7)
+#define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK (1 << 8)
+#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_0_BUS_OVERFLOW (1 << 9)
+#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_1_BUS_OVERFLOW (1 << 10)
+#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_2_BUS_OVERFLOW (1 << 11)
+
+#define VFE_0_IRQ_CLEAR_0 0x030
+#define VFE_0_IRQ_CLEAR_0_ALL 0xffffffff
+
+#define VFE_0_IRQ_CLEAR_1 0x034
+#define VFE_0_IRQ_CLEAR_1_ALL 0xffffffff
+
+#define VFE_0_IRQ_STATUS_0 0x038
+#define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_0_PING_PONG (1 << 8)
+#define VFE_0_IRQ_STATUS_0_RESET_ACK (1 << 31)
+#define VFE_0_IRQ_STATUS_1 0x03c
+#define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK (1 << 8)
+#define VFE_0_IRQ_STATUS_1_RDI0_SOF (1 << 29)
+
+#define VFE_0_BUS_CMD 0x4c
+#define VFE_0_BUS_CMD_Mx_RLD_CMD(x) (1 << (x))
+
+#define VFE_0_BUS_CFG 0x050
+
+#define VFE_0_BUS_XBAR_CFG_x(x) (0x58 + 0x4 * (x))
+#define VFE_0_BUS_XBAR_CFG_x_M0_SINGLE_STREAM_SEL_SHIFT 8
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 5
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 6
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 7
+
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x06c + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT 1
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x070 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x074 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x078 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1F << 2)
+
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x07c + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) (0x088 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) (0x08c + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff
+
+#define VFE_0_BUS_PING_PONG_STATUS 0x268
+#define VFE_0_BUS_OPERATION_STATUS 0x26c
+
+#define VFE_0_BUS_BDG_CMD 0x2c0
+#define VFE_0_BUS_BDG_CMD_HALT_REQ 1
+
+#define VFE_0_BUS_BDG_QOS_CFG_0 0x2c4
+#define VFE_0_BUS_BDG_QOS_CFG_1 0x2c8
+#define VFE_0_BUS_BDG_QOS_CFG_2 0x2cc
+#define VFE_0_BUS_BDG_QOS_CFG_3 0x2d0
+#define VFE_0_BUS_BDG_QOS_CFG_4 0x2d4
+#define VFE_0_BUS_BDG_QOS_CFG_5 0x2d8
+#define VFE_0_BUS_BDG_QOS_CFG_6 0x2dc
+#define VFE_0_BUS_BDG_QOS_CFG_7 0x2e0
+
+#define VFE_0_RDI_CFG_x(x) (0x2e8 + (0x4 * (x)))
+#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28
+#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xF << 28)
+#define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4
+#define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xF << 4)
+#define VFE_0_RDI_CFG_x_RDI_EN_BIT (1 << 2)
+#define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3
+#define VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(r) (1 << (16 + (r)))
+
+#define VFE_0_REG_UPDATE 0x378
+#define VFE_0_REG_UPDATE_RDI0 (1 << 1)
+#define VFE_0_REG_UPDATE_RDI1 (1 << 2)
+#define VFE_0_REG_UPDATE_RDI2 (1 << 3)
+
+#define VFE_0_CGC_OVERRIDE_1 0x974
+#define VFE_0_CGC_OVERRIDE_1_IMAGE_M0_CGC_OVERRIDE 1
+
+/* Vfe reset timeout */
+#define MSM_VFE_RESET_TIMEOUT_MS 50
+/* Vfe halt timeout */
+#define MSM_VFE_HALT_TIMEOUT_MS 100
+/* Max number of frame drop updates per frame */
+#define MSM_VFE_FRAME_DROP_UPDATES 5
+/* Frame drop value NOTE it VAL + UPDATES should not exceed 31 */
+#define MSM_VFE_FRAME_DROP_VAL 20
+
+
+static char *clocks[] = {
+ "camss_top_ahb_clk",
+ "vfe_clk_src",
+ "camss_vfe_vfe_clk",
+ "camss_csi_vfe_clk",
+ "iface_clk",
+ "bus_clk",
+ "camss_ahb_clk"
+};
+
+static char *reg = "vfe0";
+static char *reg_vbif = "vfe0_vbif";
+
+static char *interrupt = "vfe0";
+
+static inline void msm_vfe_reg_clr(struct vfe_device *vfe,
+ u32 reg, u32 clr_bits)
+{
+ u32 bits = readl(vfe->base + reg);
+
+ writel(bits & ~clr_bits, vfe->base + reg);
+}
+
+static inline void msm_vfe_reg_set(struct vfe_device *vfe,
+ u32 reg, u32 set_bits)
+{
+ u32 bits = readl(vfe->base + reg);
+
+ writel(bits | set_bits, vfe->base + reg);
+}
+
+static void msm_vfe_global_reset(struct vfe_device *vfe)
+{
+ u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_TESTGEN |
+ VFE_0_GLOBAL_RESET_CMD_BUS_MISR |
+ VFE_0_GLOBAL_RESET_CMD_PM |
+ VFE_0_GLOBAL_RESET_CMD_TIMER |
+ VFE_0_GLOBAL_RESET_CMD_REGISTER |
+ VFE_0_GLOBAL_RESET_CMD_BUS_BDG |
+ VFE_0_GLOBAL_RESET_CMD_BUS |
+ VFE_0_GLOBAL_RESET_CMD_CAMIF |
+ VFE_0_GLOBAL_RESET_CMD_CORE;
+
+ writel(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD);
+}
+
+static void msm_vfe_wm_enable(struct vfe_device *vfe, u32 wm, u32 enable)
+{
+ if (enable)
+ msm_vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), 1);
+ else
+ msm_vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), 1);
+}
+
+static void msm_vfe_wm_frame_based(struct vfe_device *vfe, u32 wm, u32 enable)
+{
+ if (enable) {
+ msm_vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
+ } else {
+ msm_vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
+ }
+}
+
+static void msm_vfe_wm_set_framedrop_period(struct vfe_device *vfe,
+ u32 wm, u32 per)
+{
+ u32 reg;
+
+ reg = readl(vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
+
+ reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK);
+
+ reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT) &
+ VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK;
+
+ writel(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
+}
+
+static void msm_vfe_wm_set_framedrop_pattern(struct vfe_device *vfe,
+ u32 wm, u32 pat)
+{
+ writel(pat, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm));
+}
+
+static void msm_vfe_wm_set_ub_cfg(struct vfe_device *vfe, u32 wm,
+ u16 offset, u16 depth)
+{
+ u32 reg;
+
+ reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) | depth;
+ writel(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm));
+}
+
+static void msm_vfe_bus_reload_wm(struct vfe_device *vfe, u32 wm)
+{
+ writel(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD);
+}
+
+static void msm_vfe_wm_set_ping_addr(struct vfe_device *vfe, u32 wm, u32 addr)
+{
+ writel(addr, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm));
+}
+
+static void msm_vfe_wm_set_pong_addr(struct vfe_device *vfe, u32 wm, u32 addr)
+{
+ writel(addr, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm));
+}
+
+static int msm_vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u32 wm)
+{
+ u32 reg;
+
+ reg = readl(vfe->base + VFE_0_BUS_PING_PONG_STATUS);
+
+ return (reg >> wm) & 0x1;
+}
+
+static void msm_vfe_bus_enable_wr_if(struct vfe_device *vfe, u32 enable)
+{
+ if (enable)
+ writel(0x10000009, vfe->base + VFE_0_BUS_CFG);
+ else
+ writel(0, vfe->base + VFE_0_BUS_CFG);
+}
+
+static int msm_vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u32 wm, u32 rdi)
+{
+ u32 reg;
+
+ reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
+ reg |= VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(rdi);
+ msm_vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg);
+
+ reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
+ reg |= (wm << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) &
+ VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK;
+ msm_vfe_reg_set(vfe, VFE_0_RDI_CFG_x(rdi), reg);
+
+ switch (rdi) {
+ case 0:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
+ VFE_0_BUS_XBAR_CFG_x_M0_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case 1:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
+ VFE_0_BUS_XBAR_CFG_x_M0_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case 2:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
+ VFE_0_BUS_XBAR_CFG_x_M0_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ default:
+ dev_err(vfe->camss->dev, "Invalid rdi %d\n", rdi);
+ return -EINVAL;
+ }
+
+ writel(reg, vfe->base + VFE_0_BUS_XBAR_CFG_x(wm));
+
+ writel(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm));
+
+ return 0;
+}
+
+static int msm_vfe_bus_dicconnect_wm_from_rdi(struct vfe_device *vfe, u32 rdi)
+{
+ writel(0x0, vfe->base + VFE_0_RDI_CFG_x(rdi));
+
+ return 0;
+}
+
+static void msm_vfe_set_rdi_cid(struct vfe_device *vfe, u32 rdi_idx, u8 cid)
+{
+ msm_vfe_reg_clr(vfe,
+ VFE_0_RDI_CFG_x(rdi_idx),
+ VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK);
+
+ msm_vfe_reg_set(vfe,
+ VFE_0_RDI_CFG_x(rdi_idx),
+ cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT);
+}
+
+static int msm_vfe_reg_update(struct vfe_device *vfe, int rdi_idx)
+{
+ switch (rdi_idx) {
+ case 0:
+ writel(VFE_0_REG_UPDATE_RDI0, vfe->base + VFE_0_REG_UPDATE);
+ break;
+ case 1:
+ writel(VFE_0_REG_UPDATE_RDI1, vfe->base + VFE_0_REG_UPDATE);
+ break;
+ case 2:
+ writel(VFE_0_REG_UPDATE_RDI2, vfe->base + VFE_0_REG_UPDATE);
+ break;
+ default:
+ dev_err(vfe->camss->dev, "Invalid vfe interface %d\n", rdi_idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void msm_vfe_irq_clear(struct vfe_device *vfe, u32 clr_0, u32 clr_1)
+{
+ writel(clr_0, vfe->base + VFE_0_IRQ_CLEAR_0);
+
+ writel(clr_1, vfe->base + VFE_0_IRQ_CLEAR_1);
+
+ wmb();
+
+ writel(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD);
+}
+
+static void msm_vfe_enable_irq_0(struct vfe_device *vfe)
+{
+ u32 irq_en = VFE_0_IRQ_MASK_0_IMAGE_MASTER_0_PING_PONG |
+ VFE_0_IRQ_MASK_0_IMAGE_MASTER_1_PING_PONG |
+ VFE_0_IRQ_MASK_0_IMAGE_MASTER_2_PING_PONG |
+ VFE_0_IRQ_MASK_0_IMAGE_MASTER_3_PING_PONG |
+ VFE_0_IRQ_MASK_0_IMAGE_MASTER_4_PING_PONG |
+ VFE_0_IRQ_MASK_0_IMAGE_MASTER_5_PING_PONG |
+ VFE_0_IRQ_MASK_0_IMAGE_MASTER_6_PING_PONG |
+ VFE_0_IRQ_MASK_0_RESET_ACK;
+
+ writel(irq_en, vfe->base + VFE_0_IRQ_MASK_0);
+}
+
+static void msm_vfe_enable_irq_1(struct vfe_device *vfe)
+{
+ u32 irq_en = VFE_0_IRQ_MASK_1_VIOLATION |
+ VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK |
+ VFE_0_IRQ_MASK_1_IMAGE_MASTER_0_BUS_OVERFLOW |
+ VFE_0_IRQ_MASK_1_IMAGE_MASTER_1_BUS_OVERFLOW |
+ VFE_0_IRQ_MASK_1_IMAGE_MASTER_2_BUS_OVERFLOW;
+
+ writel(irq_en, vfe->base + VFE_0_IRQ_MASK_1);
+}
+
+static void msm_vfe_enable_irq_all(struct vfe_device *vfe)
+{
+ msm_vfe_irq_clear(vfe, VFE_0_IRQ_CLEAR_0_ALL, VFE_0_IRQ_CLEAR_1_ALL);
+ msm_vfe_enable_irq_0(vfe);
+ msm_vfe_enable_irq_1(vfe);
+}
+
+static void msm_vfe_disable_irq_all(struct vfe_device *vfe)
+{
+
+ writel(0x0, vfe->base + VFE_0_IRQ_MASK_0);
+ writel(0x0, vfe->base + VFE_0_IRQ_MASK_1);
+ msm_vfe_irq_clear(vfe, VFE_0_IRQ_CLEAR_0_ALL, VFE_0_IRQ_CLEAR_1_ALL);
+}
+
+static void msm_vfe_isr_wm_done(struct vfe_device *vfe, u32 wm_idx);
+
+static irqreturn_t msm_vfe_subdev_isr(int irq, void *dev)
+{
+ struct vfe_device *vfe = dev;
+ u32 value0, value1;
+
+ value0 = readl(vfe->base + VFE_0_IRQ_STATUS_0);
+ value1 = readl(vfe->base + VFE_0_IRQ_STATUS_1);
+
+ writel(value0, vfe->base + VFE_0_IRQ_CLEAR_0);
+ writel(value1, vfe->base + VFE_0_IRQ_CLEAR_1);
+
+ wmb();
+ writel(0x1, vfe->base + VFE_0_IRQ_CMD); // Apply IRQ Clear[01]
+
+ if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK)
+ complete_all(&vfe->reset_completion);
+
+ if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK)
+ complete_all(&vfe->halt_completion);
+
+ if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_0_PING_PONG)
+ msm_vfe_isr_wm_done(vfe, 0);
+
+ return IRQ_HANDLED;
+}
+
+static int msm_vfe_reset(struct vfe_device *vfe)
+{
+ unsigned long time;
+
+ init_completion(&vfe->reset_completion);
+
+ msm_vfe_global_reset(vfe);
+
+ time = wait_for_completion_timeout(&vfe->reset_completion,
+ msecs_to_jiffies(MSM_VFE_RESET_TIMEOUT_MS));
+ if (!time) {
+ dev_err(vfe->camss->dev, "Vfe reset timeout\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int msm_vfe_halt(struct vfe_device *vfe)
+{
+ unsigned long time;
+
+ init_completion(&vfe->halt_completion);
+
+ writel(VFE_0_BUS_BDG_CMD_HALT_REQ, vfe->base + VFE_0_BUS_BDG_CMD);
+
+ time = wait_for_completion_timeout(&vfe->halt_completion,
+ msecs_to_jiffies(MSM_VFE_HALT_TIMEOUT_MS));
+ if (!time) {
+ dev_err(vfe->camss->dev, "Vfe halt timeout\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void msm_vfe_init_outputs(struct vfe_device *vfe)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vfe->output); i++) {
+ vfe->output[i].active_wm = 0;
+ vfe->output[i].state = MSM_VFE_OUTPUT_OFF;
+ vfe->output[i].buf[0] = NULL;
+ vfe->output[i].buf[1] = NULL;
+ INIT_LIST_HEAD(&vfe->output[i].pending_bufs);
+ }
+}
+
+static void msm_vfe_reset_output_maps(struct vfe_device *vfe)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ for (i = 0; i < ARRAY_SIZE(vfe->rdi_output_map); i++)
+ vfe->rdi_output_map[i] = -1;
+
+ for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++)
+ vfe->wm_output_map[i] = -1;
+
+ for (i = 0; i < ARRAY_SIZE(vfe->composite_output_map); i++)
+ vfe->composite_output_map[i] = -1;
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+}
+
+static void msm_vfe_set_qos(struct vfe_device *vfe)
+{
+ u32 val = 0xaaa5aaa5;
+ u32 val7 = 0x0001aaa5;
+
+ writel(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0);
+ writel(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1);
+ writel(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2);
+ writel(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3);
+ writel(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4);
+ writel(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5);
+ writel(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6);
+ writel(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7);
+}
+
+static void msm_vfe_im_cgc_override(struct vfe_device *vfe)
+{
+ u32 val = VFE_0_CGC_OVERRIDE_1_IMAGE_M0_CGC_OVERRIDE;
+
+ writel(val, vfe->base + VFE_0_CGC_OVERRIDE_1);
+}
+
+static void msm_vfe_output_init_addrs(struct vfe_device *vfe,
+ struct msm_vfe_output *output,
+ int sync)
+{
+ u32 ping_addr = 0;
+ u32 pong_addr = 0;
+ int i;
+
+ output->active_buf = 0;
+
+ if (output->buf[0])
+ ping_addr = output->buf[0]->addr;
+
+ if (output->buf[1])
+ pong_addr = output->buf[1]->addr;
+ else
+ pong_addr = ping_addr;
+
+ for (i = 0; i < output->active_wm; i++) {
+ dev_err(vfe->camss->dev, "init_addrs: wm[%d], ping = 0x%08x, pong = 0x%08x\n",
+ i, ping_addr, pong_addr);
+ msm_vfe_wm_set_ping_addr(vfe, output->wm[i].wm_idx, ping_addr);
+ msm_vfe_wm_set_pong_addr(vfe, output->wm[i].wm_idx, pong_addr);
+ if (sync)
+ msm_vfe_bus_reload_wm(vfe, output->wm[i].wm_idx);
+ }
+}
+
+static void msm_vfe_output_reset_addrs(struct vfe_device *vfe,
+ struct msm_vfe_output *output)
+{
+ int i;
+
+ for (i = 0; i < output->active_wm; i++) {
+ msm_vfe_wm_set_ping_addr(vfe, output->wm[i].wm_idx, 0x00);
+ msm_vfe_wm_set_pong_addr(vfe, output->wm[i].wm_idx, 0x00);
+ }
+}
+
+static void msm_vfe_output_update_ping_addr(struct vfe_device *vfe,
+ struct msm_vfe_output *output,
+ int sync)
+{
+ u32 addr = 0;
+ int i;
+
+ if (output->buf[0])
+ addr = output->buf[0]->addr;
+
+ for (i = 0; i < output->active_wm; i++) {
+ msm_vfe_wm_set_ping_addr(vfe, output->wm[i].wm_idx, addr);
+ if (sync)
+ msm_vfe_bus_reload_wm(vfe, output->wm[i].wm_idx);
+ }
+}
+
+static void msm_vfe_output_update_pong_addr(struct vfe_device *vfe,
+ struct msm_vfe_output *output,
+ int sync)
+{
+ u32 addr = 0;
+ int i;
+
+ if (output->buf[1])
+ addr = output->buf[1]->addr;
+
+ for (i = 0; i < output->active_wm; i++) {
+ msm_vfe_wm_set_pong_addr(vfe, output->wm[i].wm_idx, addr);
+ if (sync)
+ msm_vfe_bus_reload_wm(vfe, output->wm[i].wm_idx);
+ }
+}
+
+static int __msm_vfe_reserve_rdi(struct vfe_device *vfe, u32 output_idx)
+{
+ int ret = -EBUSY;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vfe->rdi_output_map); i++) {
+ if (vfe->rdi_output_map[i] < 0) {
+ vfe->rdi_output_map[i] = output_idx;
+ ret = i;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int __msm_vfe_release_rdi(struct vfe_device *vfe, u32 rdi_idx)
+{
+ if (rdi_idx > ARRAY_SIZE(vfe->rdi_output_map))
+ return -EINVAL;
+
+ vfe->rdi_output_map[rdi_idx] = -1;
+
+ return 0;
+}
+
+static int __msm_vfe_reserve_wm(struct vfe_device *vfe, u32 output_idx)
+{
+ int ret = -EBUSY;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) {
+ if (vfe->wm_output_map[i] < 0) {
+ vfe->wm_output_map[i] = output_idx;
+ ret = i;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int __msm_vfe_release_wm(struct vfe_device *vfe, u32 wm_idx)
+{
+ if (wm_idx > ARRAY_SIZE(vfe->wm_output_map))
+ return -EINVAL;
+
+ vfe->wm_output_map[wm_idx] = -1;
+
+ return 0;
+}
+
+/* Vfe hw buffer operations */
+static struct msm_video_buffer *
+__msm_vfe_get_next_output_buf(struct msm_vfe_output *output)
+{
+ struct msm_video_buffer *buffer = NULL;
+
+ if (!list_empty(&output->pending_bufs)) {
+ buffer = list_first_entry(&output->pending_bufs,
+ struct msm_video_buffer,
+ dma_queue);
+ list_del(&buffer->dma_queue);
+ }
+
+ return buffer;
+}
+
+/*
+ * msm_vfe_output_frame_drop - Set frame drop pattern per given output
+ * @vfe: Pointer to vfe device.
+ * @output: Pointer to vfe output.
+ * @drop_pattern: Kept (1) or dropped (0). The pattern starts from bit 0
+ * and progresses to bit 31.
+ */
+static void msm_vfe_output_frame_drop(struct vfe_device *vfe,
+ struct msm_vfe_output *output,
+ u32 drop_pattern)
+{
+ u32 drop_period;
+ int i;
+
+ /* We need to toggle update period to be valid on next frame */
+ output->drop_update_idx++;
+ output->drop_update_idx %= MSM_VFE_FRAME_DROP_UPDATES;
+ drop_period = MSM_VFE_FRAME_DROP_VAL + output->drop_update_idx;
+
+ for (i = 0; i < output->active_wm; i++) {
+ msm_vfe_wm_set_framedrop_period(vfe, output->wm[i].wm_idx,
+ drop_period);
+ wmb();
+ msm_vfe_wm_set_framedrop_pattern(vfe, output->wm[i].wm_idx,
+ drop_pattern);
+ wmb();
+ msm_vfe_reg_update(vfe, output->wm[i].rdi_idx);
+ }
+}
+
+/*
+ * __msm_vfe_add_output_buf - Add output buffer to vfe output
+ * @output: Pointer to vfe output.
+ * @buffer: Pointer to video buffer.
+ *
+ * NOTE: Should be called with vfe locked.
+ */
+void __msm_vfe_add_output_buf(struct msm_vfe_output *output,
+ struct msm_video_buffer *buffer)
+{
+ INIT_LIST_HEAD(&buffer->dma_queue);
+ list_add_tail(&buffer->dma_queue, &output->pending_bufs);
+}
+
+/*
+ * __msm_vfe_flush_output_bufs - Flush all pending out buffers.
+ * @output: Pointer to vfe output.
+ *
+ * NOTE: Should be called with vfe locked.
+ */
+void __msm_vfe_flush_output_bufs(struct msm_vfe_output *output)
+{
+ struct msm_video_buffer *buf;
+ struct msm_video_buffer *t;
+
+ list_for_each_entry_safe(buf, t, &output->pending_bufs, dma_queue) {
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ list_del(&buf->dma_queue);
+ }
+}
+
+static void __msm_vfe_update_wm_on_next_buf(struct vfe_device *vfe,
+ struct msm_vfe_output *output)
+{
+ switch (output->state) {
+ case MSM_VFE_OUTPUT_CONTINUOUS:
+ msm_vfe_output_frame_drop(vfe, output, 3);
+ break;
+ case MSM_VFE_OUTPUT_SINGLE:
+ dev_err_ratelimited(vfe->camss->dev,
+ "Next buf in single state!\n");
+ break;
+ default:
+ return;
+ }
+}
+
+static void __msm_vfe_update_wm_on_last_buf(struct vfe_device *vfe,
+ struct msm_vfe_output *output)
+{
+ switch (output->state) {
+ case MSM_VFE_OUTPUT_CONTINUOUS:
+ output->state = MSM_VFE_OUTPUT_SINGLE;
+ msm_vfe_output_frame_drop(vfe, output, 1);
+ break;
+ case MSM_VFE_OUTPUT_SINGLE:
+ output->state = MSM_VFE_OUTPUT_IDLE;
+ msm_vfe_output_frame_drop(vfe, output, 0);
+ msm_vfe_output_reset_addrs(vfe, output);
+ break;
+ default:
+ dev_err_ratelimited(vfe->camss->dev,
+ "Last buff in wrong state! %d\n",
+ output->state);
+ return;
+ }
+}
+
+static void __msm_vfe_update_wm_on_new_buf(struct vfe_device *vfe,
+ struct msm_vfe_output *output,
+ struct msm_video_buffer *new_buf)
+{
+ int inactive_idx;
+
+ switch (output->state) {
+
+ case MSM_VFE_OUTPUT_SINGLE:
+ inactive_idx = !output->active_buf;
+
+ if (!output->buf[inactive_idx]) {
+ output->buf[inactive_idx] = new_buf;
+
+ if (inactive_idx)
+ msm_vfe_output_update_pong_addr(vfe,
+ output, 0);
+ else
+ msm_vfe_output_update_ping_addr(vfe,
+ output, 0);
+
+ msm_vfe_output_frame_drop(vfe, output, 3);
+ output->state = MSM_VFE_OUTPUT_CONTINUOUS;
+ } else {
+ __msm_vfe_add_output_buf(output, new_buf);
+ dev_err_ratelimited(vfe->camss->dev,
+ "Inactive buffer is busy\n");
+ }
+ break;
+
+ case MSM_VFE_OUTPUT_IDLE:
+ if (!output->buf[0]) {
+ output->buf[0] = new_buf;
+
+ msm_vfe_output_init_addrs(vfe, output, 1);
+
+ /* After wm reload we can not skip second frame.
+ * Capture only second frame to avoid iommu fault */
+ msm_vfe_output_frame_drop(vfe, output, 2);
+ output->state = MSM_VFE_OUTPUT_SINGLE;
+ } else {
+ __msm_vfe_add_output_buf(output, new_buf);
+ dev_err_ratelimited(vfe->camss->dev,
+ "Output idle with buffer set!\n");
+ }
+ break;
+
+ case MSM_VFE_OUTPUT_CONTINUOUS:
+
+ default:
+ __msm_vfe_add_output_buf(output, new_buf);
+ return;
+ }
+}
+
+static struct msm_vfe_output* msm_vfe_get_output(struct vfe_device *vfe,
+ u32 output_idx)
+{
+ struct msm_vfe_output *output;
+ unsigned long flags;
+ int wm_idx;
+ int rdi_idx;
+
+ if (output_idx > ARRAY_SIZE(vfe->output))
+ return ERR_PTR(-EINVAL);
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ output = &vfe->output[output_idx];
+ if (output->state != MSM_VFE_OUTPUT_OFF) {
+ dev_err(vfe->camss->dev, "Output is running\n");
+ goto error;
+ }
+ output->state = MSM_VFE_OUTPUT_RESERVED;
+
+ output->active_buf = 0;
+
+ rdi_idx = __msm_vfe_reserve_rdi(vfe, output_idx);
+ if (rdi_idx < 0) {
+ dev_err(vfe->camss->dev, "Can not reserve rdi\n");
+ goto error_get_rdi;
+ }
+
+ /* We will use only one wm per output for now */
+ wm_idx = __msm_vfe_reserve_wm(vfe, output_idx);
+ if (wm_idx < 0) {
+ dev_err(vfe->camss->dev, "Can not reserve wm\n");
+ goto error_get_wm;
+ }
+ output->active_wm = 1;
+ output->drop_update_idx = 0;
+ output->wm[0].wm_idx = wm_idx;
+ output->wm[0].rdi_idx = rdi_idx;
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return output;
+
+error_get_wm:
+ __msm_vfe_release_rdi(vfe, rdi_idx);
+error_get_rdi:
+ output->state = MSM_VFE_OUTPUT_OFF;
+error:
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int msm_vfe_put_output(struct vfe_device *vfe,
+ struct msm_vfe_output *output)
+{
+ struct msm_vfe_wm *wm;
+ unsigned long flags;
+ int ret;
+ int i;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ for (i = 0; i < output->active_wm; i++) {
+ wm = &output->wm[i];
+
+ ret = __msm_vfe_release_wm(vfe, wm->wm_idx);
+ if (ret < 0)
+ goto out;
+
+ ret = __msm_vfe_release_rdi(vfe, wm->rdi_idx);
+ if (ret < 0)
+ goto out;
+ }
+
+ output->state = MSM_VFE_OUTPUT_OFF;
+ output->active_wm = 0;
+
+out:
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+ return ret;
+}
+
+static int msm_vfe_enable_output(struct vfe_device *vfe,
+ struct msm_vfe_output *output,
+ u32 ub_size)
+{
+ struct msm_vfe_wm *wm;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ if (output->state != MSM_VFE_OUTPUT_RESERVED) {
+ dev_err(vfe->camss->dev, "Output is not in reserved state %d\n",
+ output->state);
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+ return -EINVAL;
+ }
+ output->state = MSM_VFE_OUTPUT_IDLE;
+
+ output->buf[0] = __msm_vfe_get_next_output_buf(output);
+ if (output->buf[0])
+ output->state = MSM_VFE_OUTPUT_SINGLE;
+
+ output->buf[1] = __msm_vfe_get_next_output_buf(output);
+ if (output->buf[1])
+ output->state = MSM_VFE_OUTPUT_CONTINUOUS;
+
+ msm_vfe_set_qos(vfe);
+
+ switch (output->state) {
+ case MSM_VFE_OUTPUT_SINGLE:
+ /* After wm reload we can not skip second frame.
+ * Capture only second frame to avoid iommu fault */
+ /* Skip 4 bad frames from sensor TODO: get number from sensor */
+ msm_vfe_output_frame_drop(vfe, output, 2 << 4);
+ break;
+ case MSM_VFE_OUTPUT_CONTINUOUS:
+ /* Skip 4 bad frames from sensor TODO: get number from sensor */
+ msm_vfe_output_frame_drop(vfe, output, 3 << 4);
+ break;
+ default:
+ msm_vfe_output_frame_drop(vfe, output, 0);
+ break;
+ }
+
+ msm_vfe_output_init_addrs(vfe, output, 0);
+
+ for (i = 0; i < output->active_wm; i++) {
+ wm = &output->wm[i];
+
+ msm_vfe_bus_connect_wm_to_rdi(vfe, wm->wm_idx, wm->rdi_idx);
+
+ msm_vfe_set_rdi_cid(vfe, wm->rdi_idx, wm->rdi_idx);
+
+ msm_vfe_wm_set_ub_cfg(vfe, wm->wm_idx,
+ (ub_size * wm->wm_idx), ub_size);
+
+ msm_vfe_wm_frame_based(vfe, wm->wm_idx, 1);
+ msm_vfe_wm_enable(vfe, wm->wm_idx, 1);
+
+ msm_vfe_bus_reload_wm(vfe, output->wm[i].wm_idx);
+
+ msm_vfe_reg_update(vfe, wm->rdi_idx);
+ }
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return 0;
+}
+
+static int msm_vfe_disable_output(struct vfe_device *vfe,
+ struct msm_vfe_output *output)
+{
+ struct msm_vfe_wm *wm;
+ int i;
+
+ for (i = 0; i < output->active_wm; i++) {
+ wm = &output->wm[i];
+ msm_vfe_wm_enable(vfe, wm->wm_idx, 0);
+ msm_vfe_bus_dicconnect_wm_from_rdi(vfe, wm->rdi_idx);
+ msm_vfe_reg_update(vfe, wm->rdi_idx);
+ }
+
+ return 0;
+}
+
+static int msm_vfe_enable_all_outputs(struct vfe_device *vfe)
+{
+ struct msm_vfe_output *output;
+ u32 ub_size;
+ int ret;
+ int i;
+
+ mutex_lock(&vfe->mutex);
+
+ if (!vfe->stream_cnt)
+ return -EINVAL;
+
+ switch (vfe->hw_id) {
+ case 0:
+ ub_size = MSM_VFE_UB_MAX_SIZE_VFE0;
+ break;
+ case 1:
+ ub_size = MSM_VFE_UB_MAX_SIZE_VFE1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ub_size /= vfe->stream_cnt;
+
+ msm_vfe_im_cgc_override(vfe);
+ wmb();
+
+ /* Bus interface should be enabled first */
+ msm_vfe_bus_enable_wr_if(vfe, 1);
+
+ for (i = 0; i < vfe->stream_cnt; i++) {
+ output = msm_vfe_get_output(vfe, i);
+ if (IS_ERR_OR_NULL(output))
+ goto error;
+
+ ret = msm_vfe_enable_output(vfe, output, ub_size);
+ if (ret < 0)
+ goto error;
+ }
+ vfe->active_outputs = i;
+
+ mutex_unlock(&vfe->mutex);
+
+ return 0;
+
+error:
+ msm_vfe_bus_enable_wr_if(vfe, 0);
+
+ for (; i > 0; i--)
+ msm_vfe_put_output(vfe, &vfe->output[i - 1]);
+
+ mutex_unlock(&vfe->mutex);
+
+ return ret;
+}
+
+static int msm_vfe_disable_all_outputs(struct vfe_device *vfe)
+{
+ int i;
+
+ mutex_lock(&vfe->mutex);
+
+ msm_vfe_bus_enable_wr_if(vfe, 0);
+
+ for (i = 0; i < vfe->active_outputs; i++) {
+ msm_vfe_disable_output(vfe, &vfe->output[i]);
+ msm_vfe_put_output(vfe, &vfe->output[i]);
+ }
+
+ msm_vfe_halt(vfe);
+
+ vfe->active_outputs = 0;
+
+ mutex_unlock(&vfe->mutex);
+
+ return 0;
+}
+
+static void msm_vfe_isr_wm_done(struct vfe_device *vfe, u32 wm_idx)
+{
+ struct msm_video_buffer *ready_buf;
+ struct msm_vfe_output *output;
+ dma_addr_t new_addr;
+ unsigned long flags;
+ u32 active_index;
+
+ active_index = msm_vfe_wm_get_ping_pong_status(vfe, wm_idx);
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ if (vfe->wm_output_map[wm_idx] < 0) {
+ dev_err_ratelimited(vfe->camss->dev,
+ "Received wm done for unmapped index\n");
+ goto out_unlock;
+ }
+ output = &vfe->output[vfe->wm_output_map[wm_idx]];
+
+ if (output->active_buf == active_index) {
+ dev_err_ratelimited(vfe->camss->dev,
+ "Active buffer mismatch!\n");
+ goto out_unlock;
+ }
+ output->active_buf = active_index;
+
+ ready_buf = output->buf[!active_index];
+ if (!ready_buf) {
+ dev_err_ratelimited(vfe->camss->dev,
+ "Missing ready buf %d %d!\n",
+ !active_index, output->state);
+ goto out_unlock;
+ }
+
+ /* Get next buffer */
+ output->buf[!active_index] = __msm_vfe_get_next_output_buf(output);
+ if (!output->buf[!active_index]) {
+ new_addr = 0;
+ __msm_vfe_update_wm_on_last_buf(vfe, output);
+ } else {
+ new_addr = output->buf[!active_index]->addr;
+ __msm_vfe_update_wm_on_next_buf(vfe, output);
+ }
+
+ if (active_index)
+ msm_vfe_wm_set_ping_addr(vfe, wm_idx, new_addr);
+ else
+ msm_vfe_wm_set_pong_addr(vfe, wm_idx, new_addr);
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ if (ready_buf)
+ vb2_buffer_done(&ready_buf->vb, VB2_BUF_STATE_DONE);
+ else
+ dev_err_ratelimited(vfe->camss->dev,
+ "Received wm without buffer\n");
+
+ return;
+
+out_unlock:
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+}
+
+static int msm_vfe_bus_request(struct vfe_device *vfe)
+{
+ int ret;
+
+ vfe->bus_client = msm_bus_scale_register_client(vfe->bus_scale_table);
+ if (!vfe->bus_client) {
+ dev_err(vfe->camss->dev, "Failed to register bus client\n");
+ return -ENOENT;
+ }
+
+ ret = msm_bus_scale_client_update_request(vfe->bus_client, 1);
+ if (ret < 0) {
+ dev_err(vfe->camss->dev, "Failed bus scale update %d\n", ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void msm_vfe_bus_release(struct vfe_device *vfe)
+{
+ if (vfe->bus_client) {
+ msm_bus_scale_unregister_client(vfe->bus_client);
+ vfe->bus_client = 0;
+ }
+}
+
+static int msm_vfe_set_clock_rate(struct vfe_device *vfe)
+{
+ int ret;
+ long clk_rate;
+
+ // TODO
+ clk_rate = clk_round_rate(vfe->clocks[1].clk, 465000000);
+ if (clk_rate < 0) {
+ dev_err(vfe->camss->dev, "clk round failed\n");
+ return -EINVAL;
+ }
+ ret = clk_set_rate(vfe->clocks[1].clk, clk_rate);
+ if (ret < 0) {
+ dev_err(vfe->camss->dev, "clk set rate failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int msm_vfe_enable_clocks(struct vfe_device *vfe)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < vfe->nclocks; i++) {
+ ret = clk_prepare_enable(vfe->clocks[i].clk);
+ if (ret < 0) {
+ dev_err(vfe->camss->dev,
+ "clock prepare_enable failed %d\n", i);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ for (; i > 0; i--) {
+ clk_disable_unprepare(vfe->clocks[i - 1].clk);
+ }
+ return ret;
+}
+
+static void msm_vfe_disable_clocks(struct vfe_device *vfe)
+{
+ int i;
+
+ for (i = vfe->nclocks - 1; i >= 0; i--)
+ clk_disable_unprepare(vfe->clocks[i].clk);
+}
+
+static int msm_vfe_get(struct vfe_device *vfe)
+{
+ int ret;
+
+ mutex_lock(&vfe->mutex);
+
+ if (vfe->ref_count == 0) {
+ msm_vfe_reset_output_maps(vfe);
+
+ ret = msm_vfe_bus_request(vfe);
+ if (ret < 0) {
+ dev_err(vfe->camss->dev, "Fail bus request\n");
+ goto error_clocks;
+ }
+
+ ret = msm_vfe_set_clock_rate(vfe);
+ if (ret < 0) {
+ dev_err(vfe->camss->dev, "Fail to set clocks rate\n");
+ goto error_clocks;
+ }
+
+ ret = msm_vfe_enable_clocks(vfe);
+ if (ret < 0) {
+ dev_err(vfe->camss->dev, "Fail to enable clocks\n");
+ goto error_clocks;
+ }
+ }
+ vfe->ref_count++;
+
+ mutex_unlock(&vfe->mutex);
+
+ return 0;
+
+error_clocks:
+ mutex_unlock(&vfe->mutex);
+ return ret;
+}
+
+static void msm_vfe_put(struct vfe_device *vfe)
+{
+ mutex_lock(&vfe->mutex);
+ BUG_ON(vfe->ref_count == 0);
+
+ if (--vfe->ref_count == 0) {
+ msm_vfe_disable_irq_all(vfe);
+ msm_vfe_init_outputs(vfe);
+ msm_vfe_bus_release(vfe);
+ msm_vfe_disable_clocks(vfe);
+ }
+ mutex_unlock(&vfe->mutex);
+}
+
+static int msm_vfe_queue_dmabuf(struct camss_video *vid,
+ struct msm_video_buffer *buf)
+{
+ struct vfe_device *vfe = &vid->camss->vfe;
+ struct msm_vfe_output *output;
+ unsigned long flags;
+ int idx;
+
+ idx = 0; // TODO: msm_vfe_pad_to_output(vfe, vid->pad_idx);
+ if (idx < 0) {
+ dev_err(vfe->camss->dev,
+ "Can not queue dma buf invalid pad idx\n");
+ return idx;
+ }
+ output = &vfe->output[idx];
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ __msm_vfe_update_wm_on_new_buf(vfe, output, buf);
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return 0;
+}
+
+static int msm_vfe_flush_dmabufs(struct camss_video *vid)
+{
+ struct vfe_device *vfe = &vid->camss->vfe;
+ struct msm_vfe_output *output;
+ unsigned long flags;
+ int idx;
+
+ idx = 0; // TODO: msm_vfe_pad_to_output(vfe, vid->pad_idx);
+ if (idx < 0) {
+ dev_err(vfe->camss->dev,
+ "Can not flush dma buf invalid pad idx\n");
+ return idx;
+ }
+ output = &vfe->output[idx];
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ __msm_vfe_flush_output_bufs(output);
+
+ if (output->buf[0])
+ vb2_buffer_done(&output->buf[0]->vb, VB2_BUF_STATE_ERROR);
+
+ if (output->buf[1])
+ vb2_buffer_done(&output->buf[1]->vb, VB2_BUF_STATE_ERROR);
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return 0;
+}
+
+static int msm_vfe_subdev_set_power(struct v4l2_subdev *sd, int on)
+{
+ struct vfe_device *vfe = v4l2_get_subdevdata(sd);
+ int ret;
+
+ dev_err(vfe->camss->dev, "%s: Enter, on = %d\n",
+ __func__, on);
+
+ if (on) {
+ u32 hw_version;
+
+ ret = msm_vfe_get(vfe);
+ if (ret < 0)
+ return ret;
+
+
+ hw_version = readl(vfe->base);
+ dev_err(vfe->camss->dev,
+ "VFE HW Version = 0x%08x\n", hw_version);
+ } else {
+ msm_vfe_put(vfe);
+ }
+
+ dev_err(vfe->camss->dev, "%s: Exit, on = %d\n",
+ __func__, on);
+
+ return 0;
+}
+
+static int msm_vfe_subdev_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vfe_device *vfe = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ dev_err(vfe->camss->dev, "%s: Enter, enable = %d\n",
+ __func__, enable);
+
+ if (enable) {
+ mutex_lock(&vfe->mutex);
+
+ ret = msm_vfe_reset(vfe);
+ if (ret < 0) {
+ dev_err(vfe->camss->dev, "Fail to reset vfe\n");
+ return ret;
+ }
+
+ msm_vfe_enable_irq_all(vfe);
+
+ mutex_unlock(&vfe->mutex);
+
+ ret = msm_vfe_enable_all_outputs(vfe);
+ if (ret < 0)
+ dev_err(vfe->camss->dev,
+ "Fail to enable vfe outputs\n");
+ } else {
+ ret = msm_vfe_disable_all_outputs(vfe);
+ if (ret < 0)
+ dev_err(vfe->camss->dev,
+ "Fail to disable vfe outputs\n");
+ }
+
+ return 0;
+}
+
+int msm_vfe_subdev_init(struct vfe_device *vfe, struct camss *camss,
+ struct vfe_init *init)
+{
+ struct device *dev = camss->dev;
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device,
+ dev);
+ struct resource *r;
+ struct dma_iommu_mapping *mapping;
+ int i;
+ int ret;
+
+ mutex_init(&vfe->mutex);
+ spin_lock_init(&vfe->output_lock);
+
+ vfe->hw_id = 0; // TODO
+
+ vfe->camss = camss;
+ vfe->init = *init;
+
+ vfe->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vfe->video_out.camss = camss;
+
+ // Temp:
+#define FMT_WIDTH 1920
+#define FMT_HEIGHT 1080
+ vfe->stream_cnt = 1;
+ vfe->video_out.active_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vfe->video_out.active_fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
+ vfe->video_out.active_fmt.fmt.pix.width = FMT_WIDTH;
+ vfe->video_out.active_fmt.fmt.pix.height = FMT_HEIGHT;
+ vfe->video_out.active_fmt.fmt.pix.bytesperline = FMT_WIDTH * 2;
+ vfe->video_out.active_fmt.fmt.pix.sizeimage = FMT_WIDTH * FMT_HEIGHT * 2;
+ vfe->video_out.active_fmt.fmt.pix.field = V4L2_FIELD_NONE;
+ vfe->video_out.active_fmt.fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
+
+ /* Memory */
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, reg);
+ vfe->base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(vfe->base)) {
+ dev_err(dev, "could not map memory\n");
+ return PTR_ERR(vfe->base);
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, reg_vbif);
+ vfe->base_vbif = devm_ioremap_resource(dev, r);
+ if (IS_ERR(vfe->base_vbif)) {
+ dev_err(dev, "could not map memory\n");
+ return PTR_ERR(vfe->base_vbif);
+ }
+
+ /* Clocks */
+
+ vfe->nclocks = ARRAY_SIZE(clocks);
+ vfe->clocks = devm_kzalloc(dev, vfe->nclocks * sizeof(*vfe->clocks),
+ GFP_KERNEL);
+ if (!vfe->clocks) {
+ dev_err(dev, "could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < vfe->nclocks; i++) {
+ vfe->clocks[i].name = clocks[i];
+ }
+
+ for (i = 0; i < vfe->nclocks; i++) {
+ vfe->clocks[i].clk = devm_clk_get(dev, vfe->clocks[i].name);
+ if (IS_ERR(vfe->clocks[i].clk))
+ return PTR_ERR(vfe->clocks[i].clk);
+ }
+
+ /* IOMMU */
+
+ vfe->camss->iommu_dev = msm_iommu_get_ctx("vfe");
+ if (IS_ERR(vfe->camss->iommu_dev)) {
+ dev_err(dev, "Cannot find iommu nonsecure ctx\n");
+ return PTR_ERR(vfe->camss->iommu_dev);
+ }
+
+ mapping = arm_iommu_create_mapping(&platform_bus_type,
+ 0x40000000, 0xC0000000, 0);
+ if (IS_ERR_OR_NULL(mapping))
+ return PTR_ERR(mapping) ?: -ENODEV;
+
+ ret = arm_iommu_attach_device(vfe->camss->iommu_dev, mapping);
+ if (ret)
+ return -1;
+
+ /* Interrupt */
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, interrupt);
+ vfe->irq = r->start;
+ if (IS_ERR_VALUE(vfe->irq))
+ return vfe->irq;
+
+ ret = devm_request_irq(dev, vfe->irq, msm_vfe_subdev_isr,
+ IRQF_TRIGGER_RISING, "vfe", vfe);
+ if (ret < 0) {
+ dev_err(dev, "request_irq failed\n");
+ return ret;
+ }
+
+ /* MSM Bus */
+
+ vfe->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+ if (!vfe->bus_scale_table) {
+ dev_err(dev, "bus scaling is disabled\n");
+ return -1;
+ }
+
+ msm_vfe_init_outputs(vfe);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops msm_vfe_core_ops = {
+ .s_power = msm_vfe_subdev_set_power,
+};
+
+static const struct v4l2_subdev_video_ops msm_vfe_video_ops = {
+ .s_stream = msm_vfe_subdev_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops msm_vfe_pad_ops;
+
+static const struct v4l2_subdev_ops msm_vfe_ops = {
+ .core = &msm_vfe_core_ops,
+ .video = &msm_vfe_video_ops,
+ .pad = &msm_vfe_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_vfe_internal_ops;
+
+static struct msm_video_ops rdi_video_ops = {
+ .queue_dmabuf = msm_vfe_queue_dmabuf,
+ .flush_dmabufs = msm_vfe_flush_dmabufs,
+};
+
+
+int msm_vfe_register_entities(struct vfe_device *vfe,
+ struct v4l2_device *v4l2_dev)
+{
+ struct v4l2_subdev *sd = &vfe->subdev;
+ struct media_pad *pads = vfe->pads;
+ int ret;
+
+ v4l2_subdev_init(sd, &msm_vfe_ops);
+ sd->internal_ops = &msm_vfe_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, ARRAY_SIZE(sd->name), MSM_VFE_DRV_NAME);
+ v4l2_set_subdevdata(sd, vfe);
+
+ pads[MSM_VFE_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[MSM_VFE_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_init(&sd->entity, MSM_VFE_PADS_NUM, pads, 0);
+ if (ret < 0) {
+ pr_err("Fail to init media entity");
+ goto error_init_entity;
+ }
+
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret < 0) {
+ pr_err("Fail to register subdev");
+ goto error_reg_subdev;
+ }
+
+ vfe->video_out.ops = &rdi_video_ops;
+ ret = msm_video_register(&vfe->video_out, v4l2_dev, MSM_VFE_VIDEO_NAME);
+ if (ret < 0)
+ goto error_reg_video;
+
+ ret = media_entity_create_link(
+ &vfe->subdev.entity, MSM_VFE_PAD_SRC,
+ &vfe->video_out.video.entity, 0,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret < 0) {
+ pr_err("Fail to link %s->%s entities\n",
+ vfe->subdev.entity.name,
+ vfe->video_out.video.entity.name);
+ goto error_link;
+ }
+
+ return 0;
+
+error_link:
+ msm_video_unregister(&vfe->video_out);
+error_reg_video:
+ v4l2_device_unregister_subdev(sd);
+error_reg_subdev:
+ media_entity_cleanup(&sd->entity);
+error_init_entity:
+
+ return ret;
+}
+
+void msm_vfe_unregister_entities(struct vfe_device *vfe)
+{
+ v4l2_device_unregister_subdev(&vfe->subdev);
+ msm_video_unregister(&vfe->video_out);
+}
--- /dev/null
+/*
+ * vfe.h
+ *
+ * Qualcomm MSM Camera Subsystem - VFE Module
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2016 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef QC_MSM_CAMSS_VFE_H
+#define QC_MSM_CAMSS_VFE_H
+
+#include <linux/clk.h>
+#include <linux/spinlock_types.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "video.h"
+
+#define MSM_VFE_MAX_CID_NUM 4
+
+#define MSM_VFE_PAD_SINK 0
+#define MSM_VFE_PAD_SRC 1
+#define MSM_VFE_PADS_NUM 2
+
+/* Output limitations */
+#define MSM_VFE_MAX_WM_PER_OUTPUT 4
+#define MSM_VFE_MAX_OUTPUTS 1
+
+/* Hw definitions */
+#define MSM_VFE_NUM_RDI 3
+#define MSM_VFE_IMAGE_MASTERS_NUM 7
+#define MSM_VFE_IMAGE_COMPOSITE_NUM 4
+
+#define MSM_VFE_UB_MAX_SIZE_VFE0 827
+#define MSM_VFE_UB_MAX_SIZE_VFE1 (1535)
+
+enum msm_vfe_output_state {
+ MSM_VFE_OUTPUT_OFF,
+ MSM_VFE_OUTPUT_RESERVED,
+ MSM_VFE_OUTPUT_SINGLE,
+ MSM_VFE_OUTPUT_CONTINUOUS,
+ MSM_VFE_OUTPUT_IDLE,
+};
+
+struct msm_vfe_wm {
+ u8 rdi_idx;
+ u8 wm_idx;
+ u32 bytesperline;
+};
+
+struct msm_vfe_output {
+ u16 active_wm;
+ struct msm_vfe_wm wm[MSM_VFE_MAX_WM_PER_OUTPUT];
+
+ int active_buf;
+ struct msm_video_buffer *buf[2];
+ struct list_head pending_bufs;
+
+ int drop_update_idx;
+
+ enum msm_vfe_output_state state;
+};
+
+struct vfe_init {
+ int num_cids;
+ unsigned int cid[MSM_VFE_MAX_CID_NUM];
+};
+
+struct clock_info {
+ const char *name;
+ struct clk *clk;
+};
+
+struct vfe_device {
+ int hw_id;
+ struct vfe_init init;
+ struct v4l2_subdev subdev;
+ struct media_pad pads[MSM_VFE_PADS_NUM];
+ struct camss *camss;
+ struct camss_video video_out;
+ void __iomem *base;
+ void __iomem *base_vbif;
+ u32 irq;
+ struct clock_info *clocks;
+ int nclocks;
+ struct completion reset_completion;
+ struct completion halt_completion;
+ struct mutex mutex;
+ int ref_count;
+ spinlock_t output_lock;
+ int rdi_output_map[MSM_VFE_NUM_RDI];
+ int wm_output_map[MSM_VFE_IMAGE_MASTERS_NUM];
+ int composite_output_map[MSM_VFE_IMAGE_COMPOSITE_NUM];
+ int stream_cnt;
+ int active_outputs;
+ struct msm_vfe_output output[MSM_VFE_MAX_OUTPUTS];
+ struct msm_bus_scale_pdata *bus_scale_table;
+ uint32_t bus_client;
+};
+
+int msm_vfe_subdev_init(struct vfe_device *vfe, struct camss *camss,
+ struct vfe_init *init);
+
+int msm_vfe_register_entities(struct vfe_device *vfe,
+ struct v4l2_device *v4l2_dev);
+
+void msm_vfe_unregister_entities(struct vfe_device *vfe);
+
+#endif /* QC_MSM_CAMSS_VFE_H */
--- /dev/null
+/*
+ * video.c
+ *
+ * Qualcomm MSM Camera Subsystem - V4L2 device node
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2016 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "video.h"
+#include "camss.h"
+
+static int video_queue_setup(struct vb2_queue *q, const void *parg,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct camss_video *video = vb2_get_drv_priv(q);
+ const struct v4l2_format *fmt = parg;
+
+ *num_planes = 1;
+
+ if (NULL == fmt)
+ sizes[0] = video->active_fmt.fmt.pix.sizeimage;
+ else
+ sizes[0] = fmt->fmt.pix.sizeimage;
+
+ alloc_ctxs[0] = video->alloc_ctx;
+
+ return 0;
+}
+
+static int video_buf_init(struct vb2_buffer *vb)
+{
+ return 0;
+}
+
+static int video_buf_prepare(struct vb2_buffer *vb)
+{
+ struct camss_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ struct msm_video_buffer *buffer = container_of(vb,
+ struct msm_video_buffer, vb);
+
+ buffer->addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ vb2_set_plane_payload(vb, 0, video->active_fmt.fmt.pix.sizeimage);
+
+ return 0;
+}
+
+static void video_buf_finish(struct vb2_buffer *vb)
+{
+}
+
+static void video_buf_queue(struct vb2_buffer *vb)
+{
+ struct camss_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ struct msm_video_buffer *buffer = container_of(vb,
+ struct msm_video_buffer, vb);
+
+ msm_video_call(video, queue_dmabuf, buffer);
+}
+
+static int video_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+
+ return 0;
+}
+
+static void video_stop_streaming(struct vb2_queue *q)
+{
+ struct camss_video *video = vb2_get_drv_priv(q);
+
+ msm_video_call(video, flush_dmabufs);
+}
+
+static struct vb2_ops msm_video_vb2_q_ops = {
+ .queue_setup = video_queue_setup,
+ .buf_init = video_buf_init,
+ .buf_prepare = video_buf_prepare,
+ .buf_finish = video_buf_finish,
+ .buf_queue = video_buf_queue,
+ .start_streaming = video_start_streaming,
+ .stop_streaming = video_stop_streaming,
+};
+
+static int video_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct camss_video *video = video_drvdata(file);
+
+ strlcpy(cap->driver, video->video.name, sizeof(cap->driver));
+ strlcpy(cap->card, video->video.name, sizeof(cap->card));
+ strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
+ cap->version = CAMSS_VERSION;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_DEVICE_CAPS;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+
+ return 0;
+}
+
+static int video_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct camss_video *video = video_drvdata(file);
+
+ if (f->type != video->type)
+ return -EINVAL;
+
+ if (f->index)
+ return -EINVAL;
+
+ f->pixelformat = video->active_fmt.fmt.pix.pixelformat;
+
+ return 0;
+}
+
+static int video_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *f)
+{
+ struct camss_video *video = video_drvdata(file);
+
+ if (f->pixel_format != video->active_fmt.fmt.pix.pixelformat)
+ return -EINVAL;
+
+ if (f->index)
+ return -EINVAL;
+
+ f->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ f->discrete.width = video->active_fmt.fmt.pix.width;
+ f->discrete.height = video->active_fmt.fmt.pix.height;
+
+ return 0;
+}
+
+static int video_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *f)
+{
+ struct camss_video *video = video_drvdata(file);
+
+ if (f->pixel_format != video->active_fmt.fmt.pix.pixelformat ||
+ f->width != video->active_fmt.fmt.pix.width ||
+ f->height != video->active_fmt.fmt.pix.height)
+ return -EINVAL;
+
+ if (f->index)
+ return -EINVAL;
+
+ f->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ f->discrete.numerator = 1;
+ f->discrete.denominator = 30;
+
+ return 0;
+}
+
+static int video_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct camss_video *video = video_drvdata(file);
+
+ if (f->type != video->type)
+ return -EINVAL;
+
+ *f = video->active_fmt;
+
+ return 0;
+}
+
+static int video_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct camss_video *video = video_drvdata(file);
+
+ if (f->type != video->type)
+ return -EINVAL;
+
+ *f = video->active_fmt;
+
+ return 0;
+}
+
+static int video_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct camss_video *video = video_drvdata(file);
+
+ if (f->type != video->type)
+ return -EINVAL;
+
+ *f = video->active_fmt;
+
+ return 0;
+}
+
+static int video_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *b)
+{
+ struct camss_video *video = video_drvdata(file);
+ int ret;
+
+ ret = vb2_reqbufs(&video->vb2_q, b);
+
+ return ret;
+}
+
+static int video_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *b)
+{
+ struct camss_video *video = video_drvdata(file);
+ int ret;
+
+ ret = vb2_querybuf(&video->vb2_q, b);
+
+ return ret;
+}
+
+static int video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+ struct camss_video *video = video_drvdata(file);
+ int ret;
+
+ ret = vb2_qbuf(&video->vb2_q, b);
+
+ return ret;
+}
+
+static int video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+ struct camss_video *video = video_drvdata(file);
+ int ret;
+
+ ret = vb2_dqbuf(&video->vb2_q, b, file->f_flags & O_NONBLOCK);
+
+ return ret;
+}
+
+static int video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+ struct video_device *video_dev = video_devdata(file);
+ struct camss_video *video = video_drvdata(file);
+ struct media_entity *entity;
+ struct media_pad *pad;
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ if (type != video->type)
+ return -EINVAL;
+
+ ret = media_entity_pipeline_start(&video->video.entity, &video->pipe);
+ if (ret < 0)
+ return ret;
+
+ ret = vb2_streamon(&video->vb2_q, type);
+ if (ret < 0)
+ goto pipeline_stop;
+
+ entity = &video_dev->entity;
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_entity_remote_pad(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ ret = v4l2_subdev_call(subdev, video, s_stream, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto streamoff;
+ }
+
+ return 0;
+
+pipeline_stop:
+ media_entity_pipeline_stop(&video->video.entity);
+streamoff:
+ vb2_streamoff(&video->vb2_q, type);
+
+ return ret;
+}
+
+static int video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+ struct video_device *video_dev = video_devdata(file);
+ struct camss_video *video = video_drvdata(file);
+ struct media_entity *entity;
+ struct media_pad *pad;
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ if (type != video->type)
+ return -EINVAL;
+
+ entity = &video_dev->entity;
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_entity_remote_pad(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ v4l2_subdev_call(subdev, video, s_stream, 0);
+ }
+
+ ret = vb2_streamoff(&video->vb2_q, type);
+ if (ret)
+ return ret;
+
+ media_entity_pipeline_stop(&video->video.entity);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops msm_vid_ioctl_ops = {
+ .vidioc_querycap = video_querycap,
+ .vidioc_enum_fmt_vid_cap = video_enum_fmt,
+ .vidioc_enum_framesizes = video_enum_framesizes,
+ .vidioc_enum_frameintervals = video_enum_frameintervals,
+ .vidioc_g_fmt_vid_cap = video_g_fmt,
+ .vidioc_s_fmt_vid_cap = video_s_fmt,
+ .vidioc_try_fmt_vid_cap = video_try_fmt,
+ .vidioc_reqbufs = video_reqbufs,
+ .vidioc_querybuf = video_querybuf,
+ .vidioc_qbuf = video_qbuf,
+ .vidioc_dqbuf = video_dqbuf,
+ .vidioc_streamon = video_streamon,
+ .vidioc_streamoff = video_streamoff,
+};
+
+static int video_open(struct file *file)
+{
+ struct video_device *video_dev = video_devdata(file);
+ struct camss_video *video = video_drvdata(file);
+ struct vb2_queue *q;
+ int ret;
+
+ video->alloc_ctx = vb2_dma_contig_init_ctx(video->camss->iommu_dev);
+ if (IS_ERR(video->alloc_ctx)) {
+ dev_err(&video->video.dev, "Failed to init vb2 dma ctx\n");
+ return PTR_ERR(video->alloc_ctx);
+ }
+
+ v4l2_fh_init(&video->fh, video_dev);
+ v4l2_fh_add(&video->fh);
+ file->private_data = &video->fh;
+
+ ret = msm_camss_pipeline_pm_use(&video_dev->entity, 1);
+ if (ret < 0) {
+ dev_err(&video_dev->dev, "pipeline power-up failed\n");
+ goto error;
+ }
+
+ q = &video->vb2_q;
+ q->drv_priv = video;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->ops = &msm_video_vb2_q_ops;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; // TODO: MPLANE
+ q->io_modes = VB2_MMAP;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->buf_struct_size = sizeof(struct msm_video_buffer);
+ ret = vb2_queue_init(q);
+ if (ret < 0) {
+ dev_err(&video_dev->dev, "vb2 queue init failed\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ file->private_data = NULL;
+ v4l2_fh_del(&video->fh);
+ vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
+
+ return ret;
+}
+
+static int video_release(struct file *file)
+{
+ struct video_device *video_dev = video_devdata(file);
+ struct camss_video *video = video_drvdata(file);
+
+ vb2_queue_release(&video->vb2_q);
+
+ msm_camss_pipeline_pm_use(&video_dev->entity, 0);
+
+ file->private_data = NULL;
+ v4l2_fh_del(&video->fh);
+
+ vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
+
+ return 0;
+}
+
+static unsigned int video_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct camss_video *video = video_drvdata(file);
+
+ return vb2_poll(&video->vb2_q, file, wait);
+}
+
+static int video_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct camss_video *video = video_drvdata(file);
+
+ return vb2_mmap(&video->vb2_q, vma);
+}
+
+static const struct v4l2_file_operations msm_vid_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = video_open,
+ .release = video_release,
+ .poll = video_poll,
+ .mmap = video_mmap,
+};
+
+int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
+ const char *name)
+{
+ struct media_pad *pad = &video->pad;
+ struct video_device *video_dev = &video->video;
+ int ret;
+
+ pad->flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_init(&video_dev->entity, 1, pad, 0);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "Failed to init video entity\n");
+ return ret;
+ }
+
+ video_dev->fops = &msm_vid_fops;
+ video_dev->ioctl_ops = &msm_vid_ioctl_ops;
+ video_dev->release = video_device_release; // TODO: implement
+ video_dev->v4l2_dev = v4l2_dev;
+ video_dev->vfl_dir = VFL_DIR_RX;
+ strlcpy(video_dev->name, name, sizeof(video_dev->name));
+
+ ret = video_register_device(video_dev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ video_set_drvdata(video_dev, video);
+
+ return 0;
+}
+
+void msm_video_unregister(struct camss_video *video)
+{
+ video_unregister_device(&video->video);
+}
--- /dev/null
+/*
+ * video.h
+ *
+ * Qualcomm MSM Camera Subsystem - V4L2 device node
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2016 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef QC_MSM_CAMSS_VIDEO_H
+#define QC_MSM_CAMSS_VIDEO_H
+
+#include <linux/videodev2.h>
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/videobuf2-core.h>
+
+struct msm_video_buffer {
+ struct vb2_buffer vb;
+ unsigned long size;
+ dma_addr_t addr;
+ struct list_head dma_queue;
+};
+
+struct camss_video {
+ struct v4l2_fh fh;
+ struct camss *camss;
+ void *alloc_ctx;
+ struct vb2_queue vb2_q;
+ struct video_device video;
+ struct media_pad pad;
+ struct v4l2_format active_fmt;
+ enum v4l2_buf_type type;
+ struct media_pipeline pipe;
+ struct msm_video_ops *ops;
+};
+
+struct msm_video_ops {
+ int (*queue_dmabuf)(struct camss_video *vid, struct msm_video_buffer *buf);
+ int (*flush_dmabufs)(struct camss_video *vid);
+};
+
+#define msm_video_call(f, op, args...) \
+ (!(f) ? -ENODEV : (((f)->ops && (f)->ops->op) ? \
+ (f)->ops->op((f), ##args) : -ENOIOCTLCMD))
+
+int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
+ const char *name);
+
+void msm_video_unregister(struct camss_video *video);
+
+#endif /* QC_MSM_CAMSS_VIDEO_H */