]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'xshm/xshm-for-next'
authorStephen Rothwell <sfr@canb.auug.org.au>
Tue, 13 Dec 2011 06:22:07 +0000 (17:22 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 13 Dec 2011 06:22:07 +0000 (17:22 +1100)
17 files changed:
drivers/Kconfig
drivers/Makefile
drivers/net/caif/Kconfig
drivers/net/caif/Makefile
drivers/net/caif/caif_xshm.c [new file with mode: 0644]
drivers/xshm/Kconfig [new file with mode: 0644]
drivers/xshm/Makefile [new file with mode: 0644]
drivers/xshm/genio_dummy.c [new file with mode: 0644]
drivers/xshm/xshm_boot.c [new file with mode: 0644]
drivers/xshm/xshm_chr.c [new file with mode: 0644]
drivers/xshm/xshm_dev.c [new file with mode: 0644]
include/linux/Kbuild
include/linux/c2c_genio.h [new file with mode: 0644]
include/linux/xshm/Kbuild [new file with mode: 0644]
include/linux/xshm/xshm_ipctoc.h [new file with mode: 0644]
include/linux/xshm/xshm_netlink.h [new file with mode: 0644]
include/linux/xshm/xshm_pdev.h [new file with mode: 0644]

index 041426cfae14dca61bbb9ee35ef1e0b1013515c2..742f0288b98f7666884e20c9ad83c9d97a8ed1bb 100644 (file)
@@ -138,4 +138,6 @@ source "drivers/virt/Kconfig"
 
 source "drivers/devfreq/Kconfig"
 
+source "drivers/xshm/Kconfig"
+
 endmenu
index 91077ac6b1564a21449a155cde1b84d6678d6e13..30eae54edfa14f97eecc057d1eb2939177ef483c 100644 (file)
@@ -121,6 +121,7 @@ obj-$(CONFIG_VLYNQ)         += vlynq/
 obj-$(CONFIG_STAGING)          += staging/
 obj-y                          += platform/
 obj-y                          += ieee802154/
+obj-$(CONFIG_XSHM)             += xshm/
 #common clk code
 obj-y                          += clk/
 
index abf4d7a9dcce5183fbc89b7a7e3475b86d219618..bc037001ed6fd25f66ead6a10af350da18e911ae 100644 (file)
@@ -47,3 +47,13 @@ config CAIF_HSI
        The caif low level driver for CAIF over HSI.
        Be aware that if you enable this then you also need to
        enable a low-level HSI driver.
+
+config CAIF_XSHM
+       tristate "CAIF external memory protocol driver"
+       depends on XSHM && CAIF
+       default n
+       ---help---
+       Say "Y" if you want to support CAIF over External Shared Memory (XSHM)
+       IPC mechanism (e.g. over Chip to Chip). Only say M here if you want to
+       test CAIF over XSHM and need to load and unload its module.
+       If unsure say N.
index 91dff861560f71c8d58010066b614268ce60a935..9310b24d5a06ffae1757d85946c76017683aa0f9 100644 (file)
@@ -10,6 +10,7 @@ obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
 # Shared memory
 caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
 obj-$(CONFIG_CAIF_SHM) += caif_shm.o
+obj-$(CONFIG_CAIF_XSHM) += caif_xshm.o
 
 # HSI interface
 obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
diff --git a/drivers/net/caif/caif_xshm.c b/drivers/net/caif/caif_xshm.c
new file mode 100644 (file)
index 0000000..35cff94
--- /dev/null
@@ -0,0 +1,935 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Authors: Sjur Brendeland / sjur.brandeland@stericsson.com
+ *        Daniel Martensson / daniel.martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s() :" fmt, __func__
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <net/rtnetlink.h>
+#include <linux/if_arp.h>
+#include <net/caif/caif_device.h>
+#include <net/caif/caif_layer.h>
+#include <linux/xshm/xshm_pdev.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Martensson <daniel.martensson@stericsson.com>");
+MODULE_AUTHOR("Sjur Brendeland <sjur.brandeland@stericsson.com>");
+MODULE_DESCRIPTION("CAIF SHM driver");
+
+#define CONNECT_TIMEOUT (3 * HZ)
+#define CAIF_NEEDED_HEADROOM   32
+#define CAIF_FLOW_ON           1
+#define CAIF_FLOW_OFF          0
+
+#define LOW_XOFF_WATERMARK     50
+#define HIGH_XOFF_WATERMARK    70
+#define STUFF_MARK             30
+
+struct ringbuf {
+       __le32  *rip;
+       __le32  *wip;
+       u32     size;
+       __le32  *bufsize;
+};
+
+struct shm_pck_desc {
+       /* Offset from start of channel to CAIF frame. */
+       u32 offset;
+       u32 length;
+} __packed;
+
+struct shm_caif_frm {
+       /* Number of bytes of padding before the CAIF frame. */
+       u8 hdr_ofs;
+} __packed;
+
+#define SHM_HDR_LEN sizeof(struct shm_caif_frm)
+
+struct shmbuffer {
+/* Static part: */
+       u8 *addr;
+       u32 index;
+       u32 len;
+/* Dynamic part: */
+       u32 frames;
+       /* Offset from start of buffer to CAIF frame. */
+       u32 frm_ofs;
+};
+
+enum CFSHM_STATE {
+       CFSHM_CLOSED = 1,
+       CFSHM_OPENING,
+       CFSHM_OPEN
+};
+
+struct cfshm {
+       /* caif_dev_common must always be first in the structure*/
+       struct caif_dev_common cfdev;
+       struct xshm_dev *xshm;
+       struct napi_struct napi;
+       struct ringbuf tx;
+       struct sk_buff_head sk_qhead;
+       spinlock_t lock;
+       struct ringbuf rx;
+       u8 *rx_ringbuf;
+       u32 rx_frms_pr_buf;
+       u32 rx_alignment;
+       struct shmbuffer **rx_bufs;
+       struct net_device *ndev;
+
+       u32 tx_frms_pr_buf;
+       u32 tx_alignment;
+       struct shmbuffer **tx_bufs;
+       u8 *tx_ringbuf;
+       u32 tx_flow_on;
+       u32 high_xoff_water;
+       u32 low_xoff_water;
+       u32 stuff_mark;
+       atomic_t dbg_smp_rxactive;
+       enum CFSHM_STATE state;
+       struct platform_device *pdev;
+       struct list_head node;
+       wait_queue_head_t netmgmt_wq;
+};
+
+static LIST_HEAD(cfshm_list);
+static spinlock_t cfshm_list_lock;
+
+static unsigned int ringbuf_used(struct ringbuf *rb)
+{
+       if (le32_to_cpu(*rb->wip) >= le32_to_cpu(*rb->rip))
+               return le32_to_cpu(*rb->wip) - le32_to_cpu(*rb->rip);
+       else
+               return rb->size - le32_to_cpu(*rb->rip) + le32_to_cpu(*rb->wip);
+}
+
+static int ringbuf_get_writepos(struct ringbuf *rb)
+{
+       if ((le32_to_cpu(*rb->wip) + 1) % rb->size == le32_to_cpu(*rb->rip))
+               return -1;
+       else
+               return le32_to_cpu(*rb->wip);
+}
+
+static int ringbuf_get_readpos(struct ringbuf *rb)
+{
+
+       if (le32_to_cpu(*rb->wip) == le32_to_cpu(*rb->rip))
+               return -1;
+       else
+               return le32_to_cpu(*rb->rip);
+}
+
+static int ringbuf_upd_writeptr(struct ringbuf *rb)
+{
+       if (!WARN_ON((le32_to_cpu(*rb->wip) + 1) % rb->size == le32_to_cpu(*rb->rip))) {
+               *rb->wip = cpu_to_le32((le32_to_cpu(*rb->wip) + 1) % rb->size);
+               /* Do write barrier before updating index */
+               smp_wmb();
+       }
+       return le32_to_cpu(*rb->wip);
+}
+
+static void ringbuf_upd_readptr(struct ringbuf *rb)
+{
+       if (!WARN_ON(le32_to_cpu(*rb->wip) == le32_to_cpu(*rb->rip))) {
+               *rb->rip = cpu_to_le32((le32_to_cpu(*rb->rip) + 1) % rb->size);
+               /* Do write barrier before updating index */
+               smp_wmb();
+       }
+}
+
+
+
+static struct shmbuffer *get_rx_buf(struct cfshm *cfshm)
+{
+       struct shmbuffer *pbuf = NULL;
+       int idx = ringbuf_get_readpos(&cfshm->rx);
+
+       if (idx < 0)
+               goto out;
+       pbuf = cfshm->rx_bufs[idx];
+out:
+       return pbuf;
+}
+
+static struct shmbuffer *new_rx_buf(struct cfshm *cfshm)
+{
+       struct shmbuffer *pbuf = get_rx_buf(cfshm);
+
+       WARN_ON(!spin_is_locked(&cfshm->lock));
+       if (pbuf)
+               pbuf->frames = 0;
+
+       return pbuf;
+}
+
+static struct shmbuffer *get_tx_buf(struct cfshm *cfshm)
+{
+       int idx = ringbuf_get_writepos(&cfshm->tx);
+
+       if (idx < 0)
+               return NULL;
+       return cfshm->tx_bufs[idx];
+}
+
+inline struct shmbuffer *tx_bump_buf(struct cfshm *cfshm,
+                       struct shmbuffer *pbuf)
+{
+       u32 desc_size;
+       struct shmbuffer *newpbuf = pbuf;
+
+       WARN_ON(!spin_is_locked(&cfshm->lock));
+       if (pbuf) {
+               cfshm->xshm->cfg.tx.buf_size[pbuf->index] =
+                       cpu_to_le32(pbuf->frm_ofs);
+               ringbuf_upd_writeptr(&cfshm->tx);
+               newpbuf = get_tx_buf(cfshm);
+               /* Reset buffer parameters. */
+               desc_size = (cfshm->tx_frms_pr_buf + 1) *
+                       sizeof(struct shm_pck_desc);
+               pbuf->frm_ofs = desc_size + (desc_size % cfshm->rx_alignment);
+               pbuf->frames = 0;
+
+       }
+       return newpbuf;
+}
+
+static struct shmbuffer *shm_rx_func(struct cfshm *cfshm, int quota)
+{
+       struct shmbuffer *pbuf;
+       struct sk_buff *skb;
+       int ret;
+       unsigned long flags;
+
+       pbuf = get_rx_buf(cfshm);
+       while (pbuf) {
+               /* Retrieve pointer to start of the packet descriptor area. */
+               struct shm_pck_desc *pck_desc =
+                       ((struct shm_pck_desc *) pbuf->addr) + pbuf->frames;
+               u32 offset;
+
+               /* Loop until descriptor contains zero offset */
+               while ((offset = pck_desc->offset)) {
+                       unsigned int caif_len;
+                       struct shm_caif_frm *frm;
+                       u32 length = pck_desc->length;
+                       u8 hdr_ofs;
+                       frm = (struct shm_caif_frm *)(pbuf->addr + offset);
+                       hdr_ofs = frm->hdr_ofs;
+                       caif_len =
+                               length - SHM_HDR_LEN -
+                               hdr_ofs;
+
+                       pr_devel("copy data buf:%d frm:%d offs:%d @%x len:%d\n",
+                                       pbuf->index, pbuf->frames, offset,
+                                       (u32) (SHM_HDR_LEN + hdr_ofs + offset +
+                                               pbuf->addr - cfshm->rx_ringbuf),
+                                       length);
+
+                       /* Check whether number of frames is below limit */
+                       if (pbuf->frames > cfshm->rx_frms_pr_buf) {
+                               pr_warn("Too many frames in buffer.\n");
+                               ++cfshm->ndev->stats.rx_frame_errors;
+                               goto desc_err;
+                       }
+
+                       /* Check whether offset is below low limits */
+                       if (pbuf->addr + offset
+                                       <= (u8 *)(pck_desc + 1)) {
+                               pr_warn("Offset in desc. below buffer area.\n");
+                               ++cfshm->ndev->stats.rx_frame_errors;
+                               goto desc_err;
+                       }
+
+                       /* Check whether offset above upper limit */
+                       if (offset + length > pbuf->len) {
+                               pr_warn("Offset outside buffer area:\n");
+                               ++cfshm->ndev->stats.rx_frame_errors;
+                               goto desc_err;
+                       }
+
+                       skb = netdev_alloc_skb(cfshm->ndev,
+                                                       caif_len + 1);
+                       if (skb == NULL) {
+                               pr_debug("Couldn't allocate SKB\n");
+                               ++cfshm->ndev->stats.rx_dropped;
+                               goto out;
+                       }
+
+                       memcpy(skb_put(skb, caif_len),
+                                       SHM_HDR_LEN + hdr_ofs +
+                                       offset + pbuf->addr,
+                                       caif_len);
+
+                       skb->protocol = htons(ETH_P_CAIF);
+                       skb_reset_mac_header(skb);
+                       skb->dev = cfshm->ndev;
+
+                       /* Push received packet up the stack. */
+                       ret = netif_receive_skb(skb);
+
+                       if (!ret) {
+                               cfshm->ndev->stats.rx_packets++;
+                               cfshm->ndev->stats.rx_bytes +=
+                                       length;
+                       } else
+                               ++cfshm->ndev->stats.rx_dropped;
+                       /* Move to next packet descriptor. */
+                       pck_desc++;
+
+                       pbuf->frames++;
+                       if (--quota <= 0) {
+                               pr_devel("Quota exeeded (pbuf:%p)\n", pbuf);
+                               goto out;
+                       }
+               }
+desc_err:
+               pbuf->frames = 0;
+
+               spin_lock_irqsave(&cfshm->lock, flags);
+               ringbuf_upd_readptr(&cfshm->rx);
+               pbuf = new_rx_buf(cfshm);
+               spin_unlock_irqrestore(&cfshm->lock, flags);
+
+       }
+       cfshm->xshm->ipc_rx_release(cfshm->xshm, false);
+out:
+       return pbuf;
+}
+
+static int insert_skb_in_buf(struct cfshm *cfshm, struct sk_buff *skb,
+                                       struct shmbuffer *pbuf)
+{
+       struct shm_pck_desc *pck_desc;
+       unsigned int frmlen;
+       struct shm_caif_frm *frm;
+       u8 hdr_ofs;
+       struct caif_payload_info *info = (struct caif_payload_info *)&skb->cb;
+
+       WARN_ON(!spin_is_locked(&cfshm->lock));
+
+       if (unlikely(pbuf->frames >= cfshm->tx_frms_pr_buf)) {
+               pr_devel("-ENOSPC exeeded frames: %d >= %d\n",
+                               pbuf->frames, cfshm->tx_frms_pr_buf);
+               return -ENOSPC;
+       }
+
+       /*
+        * Align the address of the entire CAIF frame (incl padding),
+        * so the modem can do efficient DMA of this frame
+        * FIXME: Alignment is power of to, so it could use binary ops.
+        */
+       pbuf->frm_ofs = roundup(pbuf->frm_ofs, cfshm->tx_alignment);
+
+
+       /* Make the payload (IP packet) inside the frame aligned */
+       hdr_ofs = (unsigned long) &pbuf->frm_ofs;
+       hdr_ofs = roundup(hdr_ofs + SHM_HDR_LEN + info->hdr_len,
+                       cfshm->tx_alignment);
+
+       frm = (struct shm_caif_frm *)
+               (pbuf->addr + pbuf->frm_ofs);
+
+       frmlen = SHM_HDR_LEN + hdr_ofs + skb->len;
+
+       /*
+        * Verify that packet, header and additional padding
+        * can fit within the buffer frame area.
+        */
+       if (pbuf->len < pbuf->frm_ofs + frmlen) {
+               pr_devel("-ENOSPC exeeded offset %d < %d\n",
+                               pbuf->len, pbuf->frm_ofs + frmlen);
+               return -ENOSPC;
+       }
+
+       /* Copy in CAIF frame. */
+       frm->hdr_ofs = hdr_ofs;
+       skb_copy_bits(skb, 0, pbuf->addr +
+                       pbuf->frm_ofs + SHM_HDR_LEN +
+                       hdr_ofs, skb->len);
+
+       pr_devel("copy data buf:%d frm:%d offs:%d @%d len:%d\n",
+                       pbuf->index, pbuf->frames,
+                       pbuf->frm_ofs,
+                       (u32) (pbuf->addr + pbuf->frm_ofs +
+                               SHM_HDR_LEN + hdr_ofs - cfshm->tx_ringbuf),
+                       skb->len);
+
+       cfshm->ndev->stats.tx_packets++;
+       cfshm->ndev->stats.tx_bytes += frmlen;
+       /* Fill in the shared memory packet descriptor area. */
+       pck_desc = (struct shm_pck_desc *) (pbuf->addr);
+       /* Forward to current frame. */
+       pck_desc += pbuf->frames;
+       pck_desc->offset = pbuf->frm_ofs;
+       pck_desc->length = frmlen;
+       /* Terminate packet descriptor area. */
+       pck_desc++;
+       pck_desc->offset = 0;
+       pck_desc->length = 0;
+       /* Update buffer parameters. */
+       pbuf->frames++;
+       pbuf->frm_ofs += frmlen;
+
+       return 0;
+}
+
+static struct shmbuffer *queue_to_ringbuf(struct cfshm *cfshm, int *new_bufs)
+{
+       struct shmbuffer *pbuf;
+       struct sk_buff *skb;
+       int err;
+
+       WARN_ON(!spin_is_locked(&cfshm->lock));
+
+       pbuf = get_tx_buf(cfshm);
+       while (pbuf != NULL) {
+               skb = skb_peek(&cfshm->sk_qhead);
+               if (skb == NULL)
+                       break;
+               err = insert_skb_in_buf(cfshm, skb, pbuf);
+               if (unlikely(err == -ENOSPC)) {
+                       pr_devel("No more space in buffer\n");
+                       ++(*new_bufs);
+                       pbuf = tx_bump_buf(cfshm, pbuf);
+                       continue;
+               }
+               skb = skb_dequeue(&cfshm->sk_qhead);
+               /* We're always in NET_*_SOFTIRQ */
+               dev_kfree_skb(skb);
+       }
+       return pbuf;
+}
+
+static int shm_netdev_open(struct net_device *netdev)
+{
+       struct cfshm *cfshm = netdev_priv(netdev);
+       int ret, err = 0;
+
+       cfshm->state = CFSHM_OPENING;
+       if (cfshm->xshm != NULL && cfshm->xshm->open != NULL)
+               err = cfshm->xshm->open(cfshm->xshm);
+       if (err)
+               goto error;
+
+       rtnl_unlock();  /* Release RTNL lock during connect wait */
+       ret = wait_event_interruptible_timeout(cfshm->netmgmt_wq,
+                       cfshm->state != CFSHM_OPENING,
+                       CONNECT_TIMEOUT);
+       rtnl_lock();
+
+       if (ret == 0) {
+               pr_debug("connect timeout\n");
+               err = -ETIMEDOUT;
+               goto error;
+       }
+
+       if (cfshm->state !=  CFSHM_OPEN) {
+               pr_debug("connect failed\n");
+               err = -ECONNREFUSED;
+               goto error;
+       }
+
+       napi_enable(&cfshm->napi);
+       return 0;
+error:
+       if (cfshm->xshm != NULL && cfshm->xshm->close != NULL)
+               cfshm->xshm->close(cfshm->xshm);
+       return err;
+}
+
+static int shm_netdev_close(struct net_device *netdev)
+{
+       struct cfshm *cfshm = netdev_priv(netdev);
+
+       napi_disable(&cfshm->napi);
+
+       if (cfshm->xshm != NULL && cfshm->xshm->close != NULL)
+               cfshm->xshm->close(cfshm->xshm);
+
+       return 0;
+}
+
+static int open_cb(void *drv)
+{
+       struct cfshm *cfshm = drv;
+
+       cfshm->state = CFSHM_OPEN;
+       netif_carrier_on(cfshm->ndev);
+       wake_up_interruptible(&cfshm->netmgmt_wq);
+       return 0;
+}
+
+static void close_cb(void *drv)
+{
+       struct cfshm *cfshm = drv;
+
+       cfshm->state = CFSHM_CLOSED;
+       netif_carrier_off(cfshm->ndev);
+       wake_up_interruptible(&cfshm->netmgmt_wq);
+}
+
+static int caif_shmdrv_rx_cb(void *drv)
+{
+       struct cfshm *cfshm = drv;
+
+       if (unlikely(*cfshm->xshm->cfg.rx.state == cpu_to_le32(XSHM_CLOSED)))
+               return -ESHUTDOWN;
+
+       napi_schedule(&cfshm->napi);
+       return 0;
+}
+
+static int send_pending_txbufs(struct cfshm *cfshm, int usedbufs)
+{
+       unsigned long flags;
+
+       /* Send the started buffer if used buffers are low enough */
+       WARN_ON(!spin_is_locked(&cfshm->lock));
+       if (likely(usedbufs < cfshm->stuff_mark)) {
+               struct shmbuffer *pbuf = get_tx_buf(cfshm);
+               if (unlikely(pbuf->frames > 0)) {
+                       if (spin_trylock_irqsave(&cfshm->lock, flags)) {
+                               WARN_ON(!spin_is_locked(&cfshm->lock));
+                               pbuf = get_tx_buf(cfshm);
+                               tx_bump_buf(cfshm, pbuf);
+                               spin_unlock_irqrestore(&cfshm->lock, flags);
+                               cfshm->xshm->ipc_tx(cfshm->xshm);
+                               return 0;
+                       } else {
+                               return -EBUSY;
+                       }
+               }
+       }
+       return 0;
+}
+
+static int caif_shmdrv_tx_release_cb(void *drv)
+{
+       struct cfshm *cfshm = drv;
+       int usedbufs;
+
+       usedbufs = ringbuf_used(&cfshm->tx);
+
+       /* Send flow-on if we have sent flow-off and get below low-water */
+       if (usedbufs <= cfshm->low_xoff_water && !cfshm->tx_flow_on) {
+               pr_debug("Flow on\n");
+               cfshm->tx_flow_on = true;
+               cfshm->cfdev.flowctrl(cfshm->ndev, CAIF_FLOW_ON);
+       }
+
+       /* If ringbuf is full, schedule NAPI to start sending */
+       if (skb_peek(&cfshm->sk_qhead) != NULL) {
+               pr_debug("Schedule NAPI to empty queue\n");
+               napi_schedule(&cfshm->napi);
+               return 0;
+       }
+
+       /* Send the started buffer if used buffers are low enough */
+       if (usedbufs < cfshm->stuff_mark) {
+               struct shmbuffer *pbuf = get_tx_buf(cfshm);
+               if (pbuf != NULL && pbuf->frames > 0)
+                       napi_schedule(&cfshm->napi);
+       }
+       return 0;
+}
+
+static int shm_rx_poll(struct napi_struct *napi, int quota)
+{
+       struct cfshm *cfshm = container_of(napi, struct cfshm, napi);
+       int new_bufs;
+       struct shmbuffer *pbuf;
+       int usedbufs;
+       unsigned long flags;
+
+       /* Simply return if rx_poll is already called on other CPU */
+       if (atomic_read(&cfshm->dbg_smp_rxactive) > 0)
+               return quota;
+
+       WARN_ON(atomic_inc_return(&cfshm->dbg_smp_rxactive) > 1);
+
+       pbuf = shm_rx_func(cfshm, quota);
+
+       usedbufs = ringbuf_used(&cfshm->tx);
+
+       if (spin_trylock_irqsave(&cfshm->lock, flags)) {
+
+               /* Check if we're below "Stuff" limit, and send pending data */
+               send_pending_txbufs(cfshm, usedbufs);
+
+               /* Check if we have queued packets */
+               if (unlikely(skb_peek(&cfshm->sk_qhead) != NULL)) {
+                       struct shmbuffer *txbuf;
+                       WARN_ON(!spin_is_locked(&cfshm->lock));
+                       pr_debug("Try to empty tx-queue\n");
+                       new_bufs = 0;
+                       txbuf = queue_to_ringbuf(cfshm, &new_bufs);
+
+                       /* Bump out if we are configured with few buffers */
+                       if (txbuf && cfshm->xshm->cfg.tx.buffers < 3) {
+                               tx_bump_buf(cfshm, txbuf);
+
+                               spin_unlock_irqrestore(&cfshm->lock, flags);
+                               cfshm->xshm->ipc_tx(cfshm->xshm);
+                               goto txdone;
+                       }
+               }
+               spin_unlock_irqrestore(&cfshm->lock, flags);
+       }
+txdone:
+
+       if (pbuf == NULL)
+               napi_complete(&cfshm->napi);
+
+       atomic_dec(&cfshm->dbg_smp_rxactive);
+       return 0;
+}
+
+static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
+{
+       struct shmbuffer *pbuf = NULL;
+       int usedbufs;
+       int new_bufs = 0;
+       struct cfshm *cfshm = netdev_priv(shm_netdev);
+       unsigned long flags;
+
+       /*
+        * If we have packets in queue, keep queueing to avoid
+        * out-of-order delivery
+        */
+       spin_lock_irqsave(&cfshm->lock, flags);
+
+       skb_queue_tail(&cfshm->sk_qhead, skb);
+       pbuf = queue_to_ringbuf(cfshm, &new_bufs);
+
+       usedbufs = ringbuf_used(&cfshm->tx);
+
+       if (usedbufs > cfshm->high_xoff_water && cfshm->tx_flow_on) {
+               pr_debug("Flow off\n");
+               cfshm->tx_flow_on = false;
+               spin_unlock_irqrestore(&cfshm->lock, flags);
+               cfshm->cfdev.flowctrl(cfshm->ndev, CAIF_FLOW_OFF);
+               return 0;
+       }
+
+       /* Check if we should accumulate more packets */
+       if (new_bufs == 0 && usedbufs > cfshm->stuff_mark) {
+               spin_unlock_irqrestore(&cfshm->lock, flags);
+               return 0;
+       }
+       tx_bump_buf(cfshm, pbuf);
+       spin_unlock_irqrestore(&cfshm->lock, flags);
+       cfshm->xshm->ipc_tx(cfshm->xshm);
+       return 0;
+}
+
+static const struct net_device_ops netdev_ops = {
+       .ndo_open = shm_netdev_open,
+       .ndo_stop = shm_netdev_close,
+       .ndo_start_xmit = shm_netdev_tx,
+};
+
+static void shm_netdev_setup(struct net_device *pshm_netdev)
+{
+       struct cfshm *cfshm;
+
+       cfshm = netdev_priv(pshm_netdev);
+       pshm_netdev->netdev_ops = &netdev_ops;
+       pshm_netdev->type = ARPHRD_CAIF;
+       pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
+       pshm_netdev->tx_queue_len = 0;
+       pshm_netdev->destructor = free_netdev;
+
+       /* Initialize structures in a clean state. */
+       memset(cfshm, 0, sizeof(struct cfshm));
+}
+
+static void deinit_bufs(struct cfshm *cfshm)
+{
+       int j;
+
+       if (cfshm == NULL)
+               return;
+
+       for (j = 0; j < cfshm->xshm->cfg.rx.buffers; j++)
+               kfree(cfshm->rx_bufs[j]);
+       kfree(cfshm->rx_bufs);
+
+       for (j = 0; j < cfshm->xshm->cfg.tx.buffers; j++)
+               kfree(cfshm->tx_bufs[j]);
+       kfree(cfshm->tx_bufs);
+}
+
+static int cfshm_probe(struct platform_device *pdev)
+{
+       int err, j;
+       struct xshm_dev *xshm = pdev->dev.platform_data;
+       struct cfshm *cfshm = NULL;
+       struct net_device *netdev;
+       u32 buf_size;
+       unsigned long flags;
+
+       if (xshm == NULL)
+               return -EINVAL;
+       if (xshm->cfg.tx.addr == NULL || xshm->cfg.rx.addr == NULL) {
+               pr_debug("Shared Memory are not configured\n");
+               return -EINVAL;
+       }
+
+       if (xshm->cfg.tx.ch_size / xshm->cfg.tx.buffers <
+                       xshm->cfg.tx.packets * sizeof(struct shm_pck_desc) +
+                               xshm->cfg.tx.mtu) {
+               pr_warn("Bad packet TX-channel size");
+               return -EINVAL;
+       }
+
+       if (xshm->cfg.rx.ch_size / xshm->cfg.rx.buffers <
+                       sizeof(struct shm_pck_desc) + xshm->cfg.rx.mtu) {
+               pr_warn("Bad packet RX-channel size");
+               return -EINVAL;
+       }
+
+       if (xshm->cfg.rx.buffers < 2 || xshm->cfg.tx.buffers < 2) {
+               pr_warn("Too few buffers in channel");
+               return -EINVAL;
+       }
+
+       err = -ENOMEM;
+       netdev = alloc_netdev(sizeof(struct cfshm), xshm->cfg.name,
+                       shm_netdev_setup);
+
+       if (netdev == NULL)
+               goto error;
+
+       cfshm = netdev_priv(netdev);
+       cfshm->state = CFSHM_CLOSED;
+       init_waitqueue_head(&cfshm->netmgmt_wq);
+
+       cfshm->xshm = xshm;
+       xshm->driver_data = cfshm;
+       cfshm->ndev = netdev;
+       netdev->mtu = xshm->cfg.tx.mtu;
+       cfshm->high_xoff_water =
+               (xshm->cfg.rx.buffers * HIGH_XOFF_WATERMARK) / 100;
+       cfshm->low_xoff_water =
+               (xshm->cfg.rx.buffers * LOW_XOFF_WATERMARK) / 100;
+       cfshm->stuff_mark = (xshm->cfg.rx.buffers * STUFF_MARK) / 100;
+
+       cfshm->tx_frms_pr_buf = xshm->cfg.tx.packets;
+       cfshm->rx_frms_pr_buf = xshm->cfg.rx.packets;
+       cfshm->rx_alignment = xshm->cfg.rx.alignment;
+       cfshm->tx_alignment = xshm->cfg.tx.alignment;
+
+       if (xshm->cfg.latency)
+               cfshm->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
+       else
+               cfshm->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
+
+       cfshm->tx.rip = xshm->cfg.tx.read;
+       cfshm->tx.wip = xshm->cfg.tx.write;
+       cfshm->tx.bufsize = xshm->cfg.tx.buf_size;
+       cfshm->tx.size = xshm->cfg.tx.buffers;
+
+       cfshm->rx.rip = xshm->cfg.rx.read;
+       cfshm->rx.wip = xshm->cfg.rx.write;
+       cfshm->rx.bufsize = xshm->cfg.rx.buf_size;
+       cfshm->rx.size = xshm->cfg.rx.buffers;
+       pr_devel("RX ri:%d wi:%d size:%d\n",
+               le32_to_cpu(*cfshm->rx.rip),
+                       le32_to_cpu(*cfshm->rx.wip), cfshm->rx.size);
+       pr_devel("TX ri:%d wi:%d size:%d\n",
+               le32_to_cpu(*cfshm->tx.rip),
+                       le32_to_cpu(*cfshm->tx.wip), cfshm->rx.size);
+       pr_devel("frms_pr_buf:%d %d\n", cfshm->rx_frms_pr_buf,
+                       cfshm->tx_frms_pr_buf);
+
+       spin_lock_init(&cfshm->lock);
+       netif_carrier_off(netdev);
+       skb_queue_head_init(&cfshm->sk_qhead);
+
+       pr_devel("SHM DEVICE[%p] PROBED BY DRIVER, NEW SHM DRIVER"
+                       " INSTANCE AT cfshm =0x%p\n",
+                       cfshm->xshm, cfshm);
+
+       cfshm->tx_ringbuf = xshm->cfg.tx.addr;
+       cfshm->rx_ringbuf = xshm->cfg.rx.addr;
+
+       pr_devel("TX-BASE:%p RX-BASE:%p\n",
+                       cfshm->tx_ringbuf,
+                       cfshm->rx_ringbuf);
+
+       cfshm->tx_bufs = kzalloc(sizeof(struct shmbuffer *) *
+                       xshm->cfg.tx.buffers, GFP_KERNEL);
+       if (cfshm->tx_bufs == NULL)
+               goto error;
+       buf_size = xshm->cfg.tx.ch_size / xshm->cfg.tx.buffers;
+
+       pr_devel("TX: buffers:%d buf_size:%d frms:%d mtu:%d\n",
+                       xshm->cfg.tx.buffers, buf_size,
+                       cfshm->tx_frms_pr_buf, netdev->mtu);
+
+       for (j = 0; j < xshm->cfg.tx.buffers; j++) {
+               u32 desc_size;
+               struct shmbuffer *tx_buf =
+                               kzalloc(sizeof(struct shmbuffer), GFP_KERNEL);
+
+               if (tx_buf == NULL) {
+                       pr_warn("ERROR, Could not"
+                                       " allocate dynamic mem. for tx_buf, "
+                                       " Bailing out ...\n");
+                       goto error;
+               }
+
+               tx_buf->index = j;
+
+               tx_buf->addr = cfshm->tx_ringbuf + (buf_size * j);
+               tx_buf->len = buf_size;
+               tx_buf->frames = 0;
+               desc_size = (cfshm->tx_frms_pr_buf + 1) *
+                               sizeof(struct shm_pck_desc);
+
+               tx_buf->frm_ofs = desc_size + (desc_size % cfshm->tx_alignment);
+
+               cfshm->tx_bufs[j] = tx_buf;
+
+               pr_devel("tx_buf[%d] addr:%p len:%d\n",
+                               tx_buf->index,
+                               tx_buf->addr,
+                               tx_buf->len);
+       }
+
+       cfshm->rx_bufs = kzalloc(sizeof(struct shmbuffer *) *
+                               xshm->cfg.rx.buffers, GFP_KERNEL);
+       if (cfshm->rx_bufs == NULL)
+               goto error;
+       buf_size = xshm->cfg.tx.ch_size / xshm->cfg.tx.buffers;
+       pr_devel("RX: buffers:%d buf_size:%d frms:%d mtu:%d\n",
+                       xshm->cfg.rx.buffers, buf_size,
+                       cfshm->rx_frms_pr_buf, netdev->mtu);
+
+       for (j = 0; j < xshm->cfg.rx.buffers; j++) {
+               struct shmbuffer *rx_buf =
+                               kzalloc(sizeof(struct shmbuffer), GFP_KERNEL);
+
+               if (rx_buf == NULL) {
+                       pr_warn("ERROR, Could not"
+                                       " allocate dynamic mem.for rx_buf, "
+                                       " Bailing out ...\n");
+                       goto error;
+               }
+
+               rx_buf->index = j;
+
+               rx_buf->addr = cfshm->rx_ringbuf + (buf_size * j);
+               rx_buf->len = buf_size;
+               cfshm->rx_bufs[j] = rx_buf;
+               pr_devel("rx_buf[%d] addr:%p len:%d\n",
+                               rx_buf->index,
+                               rx_buf->addr,
+                               rx_buf->len);
+       }
+
+       cfshm->tx_flow_on = 1;
+       cfshm->xshm->ipc_rx_cb = caif_shmdrv_rx_cb;
+       cfshm->xshm->ipc_tx_release_cb = caif_shmdrv_tx_release_cb;
+       cfshm->xshm->open_cb = open_cb;
+       cfshm->xshm->close_cb = close_cb;
+
+       spin_lock_irqsave(&cfshm->lock, flags);
+       get_tx_buf(cfshm);
+       new_rx_buf(cfshm);
+       spin_unlock_irqrestore(&cfshm->lock, flags);
+
+       netif_napi_add(netdev, &cfshm->napi, shm_rx_poll,
+                       2 * cfshm->rx_frms_pr_buf);
+
+       err = register_netdev(netdev);
+       if (err) {
+               pr_warn("ERROR[%d], SHM could not, "
+                       "register with NW FRMWK Bailing out ...\n", err);
+               goto error;
+       }
+
+       /* Add CAIF SHM device to list. */
+       spin_lock(&cfshm_list_lock);
+       list_add_tail(&cfshm->node, &cfshm_list);
+       spin_unlock(&cfshm_list_lock);
+
+       return err;
+error:
+       deinit_bufs(cfshm);
+       free_netdev(netdev);
+       return err;
+}
+
+static int cfshm_remove(struct platform_device *pdev)
+{
+       struct xshm_dev *xshm;
+       struct cfshm *cfshm;
+
+       xshm = pdev->dev.platform_data;
+
+       if (xshm == NULL || xshm->driver_data == NULL)
+               return 0;
+
+       cfshm = xshm->driver_data;
+
+       spin_lock(&cfshm_list_lock);
+       list_del(&cfshm->node);
+       spin_unlock(&cfshm_list_lock);
+
+       deinit_bufs(cfshm);
+
+       unregister_netdev(cfshm->ndev);
+
+       xshm->ipc_rx_cb = NULL;
+       xshm->ipc_tx_release_cb = NULL;
+       xshm->open_cb = NULL;
+       xshm->close_cb = NULL;
+       xshm->driver_data = NULL;
+
+       return 0;
+}
+
+static struct platform_driver cfshm_plat_drv = {
+       .probe = cfshm_probe,
+       .remove = cfshm_remove,
+       .driver = {
+               .name = "xshmp",
+               .owner = THIS_MODULE,
+       },
+};
+
+static void __exit cfshm_exit_module(void)
+{
+       platform_driver_unregister(&cfshm_plat_drv);
+}
+
+static int __init cfshm_init_module(void)
+{
+       int err;
+
+       spin_lock_init(&cfshm_list_lock);
+
+       err = platform_driver_register(&cfshm_plat_drv);
+       if (err) {
+               printk(KERN_ERR "Could not register platform SHM driver: %d.\n",
+                       err);
+               goto err_dev_register;
+       }
+       return err;
+
+ err_dev_register:
+       return err;
+}
+
+module_init(cfshm_init_module);
+module_exit(cfshm_exit_module);
diff --git a/drivers/xshm/Kconfig b/drivers/xshm/Kconfig
new file mode 100644 (file)
index 0000000..80daca4
--- /dev/null
@@ -0,0 +1,17 @@
+# XSHM gets selected by whoever wants it.
+config XSHM
+       depends on CONFIG_C2C
+       tristate
+
+config XSHM_CHR
+       tristate "Character device for External Shared Memory (XSHM)"
+       select XSHM
+       default n
+       ---help---
+       Say "Y" to use a character device for the External Shared
+       Memory (XSHM) IPC mechanism. XSHM is an IPC protocol used to
+       talk to external device such as modem over a shared memory
+       (e.g. Chip to Chip).
+       Only say "M" here if you want to test XSHM and need to load
+       and unload its module.
+       If unsure say N.
diff --git a/drivers/xshm/Makefile b/drivers/xshm/Makefile
new file mode 100644 (file)
index 0000000..9670960
--- /dev/null
@@ -0,0 +1,3 @@
+obj-$(CONFIG_XSHM) += xshm.o
+xshm-objs := xshm_boot.o xshm_dev.o genio_dummy.o
+obj-$(CONFIG_XSHM_CHR) +=  xshm_chr.o
diff --git a/drivers/xshm/genio_dummy.c b/drivers/xshm/genio_dummy.c
new file mode 100644 (file)
index 0000000..d359ee6
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author:     Sjur Brendeland / sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+/* This is a dummy implementation of GENIO */
+
+#include <linux/c2c_genio.h>
+int genio_subscribe(int bit, void (*bit_set_cb)(void *data), void *data)
+{
+       return 0;
+}
+
+int genio_unsubscribe(int bit)
+{
+       return 0;
+}
+
+int genio_set_bit(int bit)
+{
+       return 0;
+}
+
+int genio_init(void)
+{
+       return 0;
+}
+
+void genio_exit(void)
+{
+}
+
+int genio_reset(void)
+{
+       return 0;
+}
+
+int genio_subscribe_caif_ready(void (*caif_ready_cb) (bool ready))
+{
+       return 0;
+}
+
+int genio_set_shm_addr(u32 addr, void (*ipc_ready_cb) (void))
+{
+       return 0;
+}
+
+int genio_bit_alloc(u32 setter_mask, u32 getter_mask)
+{
+       return 0;
+}
+
+void genio_register_errhandler(void (*errhandler)(int errno))
+{
+}
+
+int genio_power_req(int state)
+{
+       return 0;
+}
diff --git a/drivers/xshm/xshm_boot.c b/drivers/xshm/xshm_boot.c
new file mode 100644 (file)
index 0000000..9870e0d
--- /dev/null
@@ -0,0 +1,1187 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author:     Sjur Brændeland / sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s :" fmt, __func__
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/crc-ccitt.h>
+#include <linux/kdev_t.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+#include <linux/xshm/xshm_ipctoc.h>
+#include <linux/xshm/xshm_pdev.h>
+#include <linux/xshm/xshm_netlink.h>
+#include <linux/c2c_genio.h>
+
+#define XSHM_VERSION   0x1
+#define XSHM_SUBVER    0x0
+#define TOC_SZ         512
+#define IMG_MAX_SZ     65536
+#define XSHM_ALIGNMT   sizeof(u32)
+#define XSHM_MAX_CHANNELS 7
+#define XSHM_MIN_CHSZ 3
+#define XSHM_PAYL_ALIGN max(32, L1_CACHE_BYTES)
+
+#define GET_OFFSET(base, ptr) (((u8 *)(ptr)) - ((u8 *)(base)))
+#define OFFS2PTR(base, offs) ((void *) ((u8 *)base + offs))
+#define LEOFFS2PTR(base, offs) ((void *) ((u8 *)base + le32_to_cpu(offs)))
+
+/* Structure use in debug mode for integrity checking */
+struct ipctoc_hash {
+       u16 img_hash;
+       u16 ch_hash;
+       u16 ch_size;
+};
+
+static bool config_error;
+static bool commited;
+static bool registered;
+static bool addr_set;
+static u32 modem_bootimg_size;
+static void *shm_start;
+static u32 xshm_channels;
+static struct xshm_dev *xshmdevs[XSHM_MAX_CHANNELS + 1];
+static struct xshm_ipctoc *ipctoc;
+static struct device _parentdev;
+static struct device *parentdev;
+
+static unsigned long xshm_start;
+module_param(xshm_start, ulong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(xshm_start, "Address for memory shared by host/modem.");
+
+static unsigned long xshm_c2c_bootaddr;
+module_param(xshm_c2c_bootaddr, ulong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(xshm_c2c_bootaddr, "Address given to modem (through GENI register)");
+
+static long xshm_size;
+module_param(xshm_size, long, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(xshm_size, "Size of SHM area");
+
+#ifdef DEBUG
+
+/* In debug mode we pad around all payload area in order to detect overwrite */
+#define MAGIC_PAD_LEN 32
+#define MAGIC_PAD 0xbc
+
+/* Verify a magic-pad area */
+static inline bool padok(void *mag)
+{
+       u32 *p = mag, v = 0xbcbcbcbc;
+       int i;
+
+       for (i = 0; i < 8; i++)
+               if (*p++ != v)
+                       return false;
+       return true;
+}
+
+/* Insert a magic-pad area */
+static inline void add_magic_pad(u32 *offset, void *base)
+{
+       if (*offset < xshm_size)
+               memset(base + *offset, MAGIC_PAD, MAGIC_PAD_LEN);
+       *offset += MAGIC_PAD_LEN;
+}
+
+/* Abuse the pad area to create a checksum of the ipc-toc and descriptors */
+static inline void store_checksum(struct xshm_ipctoc *ipctoc, u32 size)
+{
+       struct ipctoc_hash *hash = (void *)ipctoc;
+       --hash;
+       hash->img_hash =
+               crc_ccitt(0xffff, (u8 *) shm_start, modem_bootimg_size);
+       hash->ch_hash = crc_ccitt(0xffff, (u8 *) ipctoc, size);
+       hash->ch_size = size;
+}
+
+/* Verify that shm config has not been accidently tampered. */
+static inline bool ok_checksum(struct xshm_ipctoc *ipctoc)
+{
+       struct ipctoc_hash *hash = (void *) ipctoc;
+       u16 new_hash, new_imghash;
+       int i;
+       u8 *p;
+
+       if (!commited)
+               return false;
+
+       for (i = 0; i < xshm_channels; i++) {
+               struct xshm_ipctoc_channel *ch;
+
+               ch = LEOFFS2PTR(shm_start, ipctoc->channel_offsets[i].rx);
+               p = LEOFFS2PTR(shm_start, ch->ipc);
+               if (!padok(p - MAGIC_PAD_LEN))
+                       return false;
+               p = LEOFFS2PTR(shm_start, ch->offset);
+               if (!padok(p - MAGIC_PAD_LEN))
+                       return false;
+               ch = LEOFFS2PTR(shm_start, ipctoc->channel_offsets[i].tx);
+               p = LEOFFS2PTR(shm_start, ch->ipc);
+               if (!padok(p - MAGIC_PAD_LEN))
+                       return false;
+               p = LEOFFS2PTR(shm_start, ch->offset);
+               if (!padok(p - MAGIC_PAD_LEN))
+                       return false;
+       }
+
+       --hash;
+       new_hash = crc_ccitt(0xffff, (u8 *) ipctoc, hash->ch_size);
+       new_imghash =
+               crc_ccitt(0xffff, (u8 *) shm_start, modem_bootimg_size);
+       pr_debug("Hash result:size:%d chksm:%u/%u img:%u/%u\n",
+                       hash->ch_size, hash->ch_hash, new_hash,
+                       hash->img_hash, new_imghash);
+       return hash->ch_hash == new_hash && hash->img_hash == new_imghash;
+}
+
+static inline void init_data(u32 offset, int ch, u32 size)
+{
+       memset((u8 *)shm_start + offset, ch + 1, size);
+}
+#else
+#define MAGIC_PAD_LEN 0
+static inline void add_magic_pad(u32 *offset, void *base)
+{
+}
+static inline void store_checksum(void *ipctoc, u32 size)
+{
+}
+static inline bool ok_checksum(void *ipctoc)
+{
+       return true;
+}
+static inline void init_data(u32 offs, int ch, u32 size)
+{
+}
+#endif
+
+/* write_to_shm - Write SHM Channel descriptors to SHM.
+ *
+ * Based on the configuration data channel configuration
+ * is written to the shared memory area.
+ * This is the data layout:
+ *
+ * +------------+  <---- xshm_start
+ * |   TOC     |
+ * +------------+
+ * | Boot IMG  |
+ * +------------+ <---- rw_start
+ * | RW Data   |
+ * +------------+
+ * | RW Buf idx |
+ * +------------+ <---- ipctoc
+ * | IPC TOC   |
+ * +------------+
+ * | RW Ch Decr |
+ * +------------+ <---- ro_start
+ * | RO Ch Decr |
+ * +------------+
+ * | RO Buf idx |
+ * +------------+
+ * | RO Data   |
+ * +------------+
+ */
+
+static int write_to_shm(void)
+{
+       int i, pri, bitno;
+       u32 offset, ro_start, rw_start, ipctoc_offs, ipcro_offs;
+       bool found;
+       struct xshm_ipctoc_channel *ch;
+       struct toc_entry *toc_entry;
+       struct xshm_bufidx *bix;
+
+       /*
+        * Find where to put IPC-TOC by adding up
+        * the size of Payload buffers pluss buf-indices
+        */
+       ipctoc_offs = ALIGN(modem_bootimg_size, XSHM_PAYL_ALIGN);
+       rw_start = ipctoc_offs;
+       for (i = 0; i < xshm_channels; i++) {
+               int n = xshmdevs[i]->cfg.tx.buffers;
+               ipctoc_offs += MAGIC_PAD_LEN;
+               ipctoc_offs += offsetof(struct xshm_bufidx, size[n + 2]);
+               ipctoc_offs = ALIGN(ipctoc_offs, XSHM_PAYL_ALIGN);
+               ipctoc_offs += MAGIC_PAD_LEN;
+               ipctoc_offs += xshmdevs[i]->cfg.tx.ch_size;
+               ipctoc_offs = ALIGN(ipctoc_offs, XSHM_PAYL_ALIGN);
+       }
+       add_magic_pad(&ipctoc_offs, shm_start);
+       pr_debug("IPC toc @ %08x\n", ipctoc_offs);
+
+       /*
+        * Allocate the IPC-TOC and, initiatlize it.
+        * The IPC toc will be located after the RW Data and
+        * buffer indices.
+        */
+       offset = ipctoc_offs;
+       ipctoc = OFFS2PTR(shm_start, ipctoc_offs);
+       ipctoc->magic[0] = XSHM_IPCTOC_MAGIC1;
+       ipctoc->magic[1] = XSHM_IPCTOC_MAGIC2;
+       ipctoc->version = XSHM_VERSION;
+       ipctoc->subver = XSHM_SUBVER;
+       memset(ipctoc->channel_offsets, 0, sizeof(ipctoc->channel_offsets));
+
+       /* Find start of first channel descriptor */
+       offset += sizeof(struct xshm_ipctoc);
+
+       /*
+        * Allocate the location for the RW Channel descriptors.
+        * It will be located after the IPC-TOC.
+        */
+       offset = ALIGN(offset, XSHM_ALIGNMT);
+       for (i = 0; i < xshm_channels; i++) {
+               pr_debug("Channel descriptor %d RW @ 0x%08x\n", i, offset);
+               ipctoc->channel_offsets[i].tx = cpu_to_le32(offset);
+               offset += sizeof(struct xshm_ipctoc_channel);
+               offset = ALIGN(offset, XSHM_ALIGNMT);
+               if (offset > xshm_size)
+                       goto badsize;
+       }
+       ro_start = offset;
+
+       /*
+        * Allocate the location for the RO Channel descriptors.
+        * It will be located after the RW Channels.
+        */
+       for (i = 0; i < xshm_channels; i++) {
+               pr_debug("Channel descriptor %d RO @ 0x%08x\n", i, offset);
+               ipctoc->channel_offsets[i].rx = cpu_to_le32(offset);
+               offset += sizeof(struct xshm_ipctoc_channel);
+               offset = ALIGN(offset, XSHM_ALIGNMT);
+               if (offset > xshm_size)
+                       goto badsize;
+       }
+
+       /*
+        * Allocate the location for the RO Buffer Indices.
+        * It will be located after the RO Channels.
+        */
+       offset = ALIGN(offset, XSHM_PAYL_ALIGN);
+       ipcro_offs = offset;
+       for (i = 0; i < xshm_channels; i++) {
+               int n = xshmdevs[i]->cfg.rx.buffers;
+               ch = LEOFFS2PTR(shm_start, ipctoc->channel_offsets[i].rx);
+               add_magic_pad(&offset, shm_start);
+               ch->ipc = cpu_to_le32(offset);
+
+               bix = OFFS2PTR(shm_start, offset);
+               bix->read_index = cpu_to_le32(0);
+               bix->write_index = cpu_to_le32(0);
+               bix->state = cpu_to_le32(XSHM_CLOSED);
+               bix->size[0] = cpu_to_le32(0);
+
+               pr_debug("IPC RO[%d] @: 0x%08x\n",  i, offset);
+               offset += offsetof(struct xshm_bufidx, size[n + 2]);
+               offset = ALIGN(offset, XSHM_PAYL_ALIGN);
+               if (offset > xshm_size)
+                       goto badsize;
+       }
+
+       /*
+        * Allocate RO Data Area. This will located after
+        * the RO Buffer Indices at the end of the Share Memory
+        * area.
+        */
+       offset = ALIGN(offset, XSHM_PAYL_ALIGN);
+       for (i = 0; i < xshm_channels; i++) {
+               u8 align;
+               u32 size;
+               ch = LEOFFS2PTR(shm_start, ipctoc->channel_offsets[i].rx);
+               add_magic_pad(&offset, shm_start);
+               ch->offset = cpu_to_le32(offset);
+
+               BUILD_BUG_ON(sizeof(ch->mode) != 1);
+               ch->mode = xshmdevs[i]->cfg.mode & XSHM_MODE_MASK;
+               ch->buffers = cpu_to_le32(xshmdevs[i]->cfg.rx.buffers);
+               align = rounddown_pow_of_two(xshmdevs[i]->cfg.rx.alignment);
+               ch->alignment = align;
+               ch->packets = xshmdevs[i]->cfg.rx.packets;
+               ch->mtu = xshmdevs[i]->cfg.rx.mtu;
+               size = xshmdevs[i]->cfg.tx.ch_size;
+               if (xshmdevs[i]->cfg.mode & XSHM_PACKET_MODE) {
+                       u32 buf_size;
+                       buf_size = size / xshmdevs[i]->cfg.tx.buffers;
+                       buf_size = rounddown(buf_size, align);
+                       size = buf_size * xshmdevs[i]->cfg.tx.buffers;
+               }
+               pr_debug("Buffer area RO for Channel[%d] at: 0x%08x size:%d\n",
+                               i, offset, size);
+               ch->size = cpu_to_le32(size);
+
+               init_data(offset, i, xshmdevs[i]->cfg.rx.ch_size);
+               offset += xshmdevs[i]->cfg.rx.ch_size;
+               offset = ALIGN(offset, XSHM_PAYL_ALIGN);
+               if (offset > xshm_size)
+                       goto badsize;
+       }
+
+       /*
+        * Allocate RW Data Area. This will located in the beginning
+        * just after the Modem Boot Image.
+        */
+       offset = rw_start;
+       for (i = 0; i < xshm_channels; i++) {
+               u8 align;
+               u32 size;
+               ch = LEOFFS2PTR(shm_start, ipctoc->channel_offsets[i].tx);
+               add_magic_pad(&offset, shm_start);
+               ch->offset = cpu_to_le32(offset);
+               init_data(offset, i, xshmdevs[i]->cfg.tx.ch_size);
+               ch->mode = xshmdevs[i]->cfg.mode &
+                               XSHM_MODE_MASK;
+               ch->buffers = cpu_to_le32(xshmdevs[i]->cfg.tx.buffers);
+               align = rounddown_pow_of_two(xshmdevs[i]->cfg.rx.alignment);
+               ch->alignment = align;
+               ch->packets = xshmdevs[i]->cfg.rx.packets;
+               ch->mtu = xshmdevs[i]->cfg.rx.mtu;
+               size = xshmdevs[i]->cfg.tx.ch_size;
+               if (xshmdevs[i]->cfg.mode & XSHM_PACKET_MODE) {
+                       u32 buf_size;
+                       buf_size = size / xshmdevs[i]->cfg.tx.buffers;
+                       buf_size = rounddown(buf_size, align);
+                       size = buf_size * xshmdevs[i]->cfg.tx.buffers;
+               }
+               ch->size = cpu_to_le32(size);
+               pr_debug("Buffer area RW for Channel[%d] at: 0x%08x size:%d\n",
+                               i, offset, size);
+               offset += xshmdevs[i]->cfg.tx.ch_size;
+               offset = ALIGN(offset, XSHM_PAYL_ALIGN);
+               if (offset > ro_start)
+                       goto badsize;
+       }
+
+       /*
+        * Allocate RW IPC Area. This will located after RW data area,
+        * just before the IPC-TOC.
+        */
+       offset = ALIGN(offset, XSHM_PAYL_ALIGN);
+       for (i = 0; i < xshm_channels; i++) {
+               int n = xshmdevs[i]->cfg.tx.buffers;
+               ch = LEOFFS2PTR(shm_start, ipctoc->channel_offsets[i].tx);
+               add_magic_pad(&offset, shm_start);
+               ch->ipc = cpu_to_le32(offset);
+               bix = OFFS2PTR(shm_start, offset);
+               bix->read_index = cpu_to_le32(0);
+               bix->write_index = cpu_to_le32(0);
+               bix->state = cpu_to_le32(XSHM_CLOSED);
+               bix->size[0] = cpu_to_le32(0);
+
+               pr_debug("IPC RW[%d] @: 0x%08x\n",  i, offset);
+               offset += offsetof(struct xshm_bufidx, size[n + 2]);
+               offset = ALIGN(offset, XSHM_PAYL_ALIGN);
+               if (offset > xshm_size)
+                       goto badsize;
+       }
+
+       /* Allocate genio bits for each channel according to priority*/
+       bitno = 0;
+       for (pri = 0; pri < 8; pri++) {
+               for (i = 0; i < xshm_channels; i++) {
+                       if (xshmdevs[i]->cfg.priority == pri) {
+                               ch = LEOFFS2PTR(shm_start,
+                                               ipctoc->channel_offsets[i].tx);
+                               ch->write_bit = cpu_to_le16(bitno * 4);
+                               ch->read_bit = cpu_to_le16(bitno * 4 + 2);
+                               ch = LEOFFS2PTR(shm_start,
+                                               ipctoc->channel_offsets[i].rx);
+                               ch->write_bit = cpu_to_le16(bitno * 4 + 1);
+                               ch->read_bit = cpu_to_le16(bitno * 4 + 3);
+                               bitno++;
+                       }
+               }
+       }
+
+       /*
+        * The Master TOC points out the boot images for the modem,
+        * Use the first avilable entry in the toc to write the pointer,
+        * to the IPC-TOC defined above.
+        */
+       found = false;
+       for (toc_entry = shm_start, i = 0; i < 16; i++, toc_entry++)
+               if (toc_entry->start == cpu_to_le32(0xffffffff)) {
+                       pr_debug("IPCTOC address written into Master TOC"
+                                       " @ 0x%08x\n", i * 32);
+                       toc_entry->start =
+                               cpu_to_le32(GET_OFFSET(shm_start, ipctoc));
+                       toc_entry->size = cpu_to_le32(0);
+                       toc_entry->flags = cpu_to_le32(0);
+                       toc_entry->entry_point = cpu_to_le32(0);
+                       toc_entry->load_addr = cpu_to_le32(0xffffffff);
+                       memset(toc_entry->name, 0, sizeof(toc_entry->name));
+                       sprintf(toc_entry->name, "ipc-toc");
+                       found = true;
+                       break;
+               }
+       if (!found) {
+               pr_debug("Cannot insert IPC-TOC in toc\n");
+               goto bad_config;
+       }
+
+       store_checksum(ipctoc, ipcro_offs - ipctoc_offs);
+
+       return 0;
+
+badsize:
+       pr_debug("IPCTOC not enough space offset (size:0x%lx offset:0x%x\n",
+                       xshm_size, offset);
+       return -ENOSPC;
+
+bad_config:
+       pr_debug("IPCTOC bad configuration data\n");
+       return -EINVAL;
+}
+
+static int xshm_verify_config(struct xshm_channel *xcfg)
+{
+       int j;
+
+       if ((xcfg->mode & XSHM_MODE_MASK) != XSHM_PACKET_MODE &&
+                       (xcfg->mode & XSHM_MODE_MASK) != XSHM_STREAM_MODE) {
+               pr_debug("Bad config:"
+                               "channel mode must be set\n");
+               return -EINVAL;
+       }
+       if (xcfg->mode & XSHM_PACKET_MODE && xcfg->rx.buffers < 2) {
+               pr_debug("Bad config:minimum 2 buffers "
+                               "must be set for packet mode\n");
+               return -EINVAL;
+       }
+
+       if (xcfg->rx.ch_size < XSHM_MIN_CHSZ) {
+               pr_debug("Bad config:"
+                               "Channel size must be larger than %d\n",
+                               XSHM_MIN_CHSZ);
+               return -EINVAL;
+       }
+
+       if (xcfg->mode & XSHM_PACKET_MODE) {
+               if (xcfg->tx.buffers < 2) {
+                       pr_debug("Bad config:"
+                               "buffers must be minimum 2 packet mode\n");
+                       return -EINVAL;
+               }
+               if (xcfg->tx.packets < 1) {
+                       pr_debug("Bad config:"
+                               "packets must be set for packet mode\n");
+                       return -EINVAL;
+               }
+       }
+
+       if (xcfg->tx.ch_size < XSHM_MIN_CHSZ) {
+               pr_debug("Bad config:"
+                               "Channel size must be larger than %d\n",
+                               XSHM_MIN_CHSZ);
+               return -EINVAL;
+       }
+
+       if (xcfg->name[0] == '\0') {
+               pr_debug("Channel must be named\n");
+               return -EINVAL;
+       }
+       for (j = 0; j < xshm_channels; j++) {
+               struct xshm_channel *xcfg2 = &xshmdevs[j]->cfg;
+               if (xcfg != xcfg2 && strcmp(xcfg->name, xcfg2->name) == 0) {
+                       pr_debug("Channels has same name:%s\n",
+                                        xcfg->name);
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
+static int verify_config(void)
+{
+       int i;
+
+       if (xshm_channels == 0) {
+               pr_debug("Bad config: minimum one channel must be defined\n");
+               return -EINVAL;
+       }
+       for (i = 0; i < xshm_channels; i++) {
+               int err = xshm_verify_config(&xshmdevs[i]->cfg);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+/*
+ * Create Configuration data for the platform devices.
+ */
+static void create_devs(void)
+{
+       int i;
+
+       for (i = 0; i < xshm_channels; i++) {
+               struct xshm_bufidx *buf_rx, *buf_tx;
+               struct xshm_ipctoc_channel *ch_rx, *ch_tx;
+               struct xshm_channel *xcfg = &xshmdevs[i]->cfg;
+               ch_rx = LEOFFS2PTR(shm_start,
+                               ipctoc->channel_offsets[i].rx);
+               buf_rx = LEOFFS2PTR(shm_start, ch_rx->ipc);
+               ch_tx = LEOFFS2PTR(shm_start,
+                               ipctoc->channel_offsets[i].tx);
+               buf_tx = LEOFFS2PTR(shm_start, ch_tx->ipc);
+
+               /*
+                * Due to restricted read-only access
+                * we swap positions for read/write
+                * pointers.
+                */
+               xcfg->tx.write = &buf_tx->write_index;
+               xcfg->tx.read = &buf_rx->read_index;
+
+               xcfg->rx.write = &buf_rx->write_index;
+               xcfg->rx.read = &buf_tx->read_index;
+
+               xcfg->rx.addr = LEOFFS2PTR(shm_start, ch_rx->offset);
+               xcfg->tx.addr = LEOFFS2PTR(shm_start, ch_tx->offset);
+               xcfg->rx.state = &buf_rx->state;
+               xcfg->tx.state = &buf_tx->state;
+               xcfg->tx.buf_size = buf_tx->size;
+               xcfg->rx.buf_size = buf_rx->size;
+
+               xcfg->rx.xfer_bit = le16_to_cpu(ch_rx->write_bit);
+               xcfg->tx.xfer_bit = le16_to_cpu(ch_tx->write_bit);
+               xcfg->rx.xfer_done_bit = le16_to_cpu(ch_rx->read_bit);
+               xcfg->tx.xfer_done_bit = le16_to_cpu(ch_tx->read_bit);
+
+               if (xcfg->mode & XSHM_PAIR_MODE) {
+                       struct xshm_channel *pair;
+                       pr_debug("Channel[%d] is in PAIR mode\n", i);
+                       if (i < 1) {
+                               pr_debug("No channel to pair with\n");
+                               continue;
+                       }
+                       /* Cross couple rx/tx on the pair */
+                       pair = &xshmdevs[i - 1]->cfg;
+
+                       /* Copy everything but the kobj which is at the end */
+                       memcpy(&xcfg->tx, &pair->rx,
+                                       offsetof(struct xshm_udchannel, kobj));
+                       memcpy(&xcfg->rx, &pair->tx,
+                                       offsetof(struct xshm_udchannel, kobj));
+               } else if (xcfg->mode & XSHM_LOOP_MODE) {
+                       pr_debug("Channel[%d] is in LOOP mode\n", i);
+                       /*
+                        * Connect rx/tx in a pair. Copy everything,
+                        * but the kobj which is at the end
+                        */
+                       memcpy(&xcfg->tx, &xcfg->rx,
+                                       offsetof(struct xshm_udchannel, kobj));
+               }
+
+               pr_devel("RX[%d] wi:%p ri:%p\n", i, xcfg->rx.read,
+                               xcfg->rx.write);
+               pr_devel("TX[%d] wi:%p ri:%p\n", i, xcfg->tx.read,
+                               xcfg->tx.write);
+       }
+}
+
+static int do_commit(void)
+{
+       int err;
+
+       if (config_error) {
+               pr_devel("config error detected\n");
+               return -EINVAL;
+       }
+
+       if (commited) {
+               pr_devel("already commited\n");
+               config_error = true;
+               return -EINVAL;
+       }
+       err = verify_config();
+       if (err) {
+               pr_devel("bad config\n");
+               config_error = true;
+               return err;
+       }
+       err = write_to_shm();
+       if (err) {
+               pr_devel("writei to SHM failed\n");
+               config_error = true;
+               return err;
+       }
+       commited = true;
+       create_devs();
+       return 0;
+}
+
+static int do_register(void)
+{
+       int i, err;
+
+       if (!commited || registered || config_error) {
+               pr_devel("bad sequence of requests\n");
+               config_error = true;
+               return -EINVAL;
+       }
+
+       err = verify_config();
+       if (err) {
+               config_error = true;
+               pr_devel("bad config\n");
+               return err;
+       }
+       registered = true;
+
+       for (i = 0; i < xshm_channels; i++)
+               xshm_register_dev(xshmdevs[i]);
+
+       return 0;
+}
+
+static void do_reset(void)
+{
+       xshm_reset();
+       config_error = false;
+       ready_for_ipc = false;
+       ready_for_caif = false;
+       registered = false;
+       commited = false;
+       addr_set = false;
+       modem_bootimg_size = TOC_SZ;
+       xshm_channels = 0;
+}
+
+static int do_set_addr(void)
+{
+       int err;
+       if (!commited || addr_set || config_error) {
+               pr_devel("bad sequence of requests\n");
+               config_error = true;
+               return -EINVAL;
+       }
+       err = verify_config();
+       if (err) {
+               config_error = true;
+               pr_devel("bad config\n");
+               return err;
+       }
+       addr_set = true;
+       return genio_set_shm_addr(xshm_c2c_bootaddr, genio_ipc_ready_cb);
+}
+
+static void parent_release(struct device *dev)
+{
+}
+
+static int copy_name(const char *src, char *d, size_t count)
+{
+       const char *s, *end = src + count;
+       for (s = src; *s && s < end; s++, d++)
+               if (*s == '\0' || *s == '\n')
+                       break;
+               else if (!isalnum(*s)) {
+                       pr_debug("Illegal chr:'%c' in name:'%s'\n", *s, src);
+                       return -EINVAL;
+               } else if (s - src >= XSHM_NAMESZ - 1) {
+                       pr_debug("Name '%s'too long\n", src);
+                       return -EINVAL;
+               } else
+                       *d = *s;
+       *d = '\0';
+
+       return count;
+}
+
+inline struct xshm_dev *get_dev2xshm(struct device *dev)
+{
+       struct platform_device *pdev;
+       struct xshm_dev *xshmdev;
+       pdev = container_of(dev, struct platform_device, dev);
+       xshmdev = container_of(pdev, struct xshm_dev, pdev);
+       return xshmdev;
+}
+
+static void xshmdev_release(struct device *dev)
+{
+       struct xshm_dev *xshm = get_dev2xshm(dev);
+       kfree(xshm);
+}
+
+/* sysfs: Read the modem firmware (actually the whole shared memory area) */
+static ssize_t modemfw_read(struct file *file, struct kobject *kobj,
+                       struct bin_attribute *attr,
+                       char *buf, loff_t off, size_t count)
+{
+#ifdef DEBUG
+       /* Read shm area is usefull for debug */
+       if (off > xshm_size)
+               return 0;
+       if (off + count > xshm_size)
+               count = xshm_size - off;
+       memcpy(buf, shm_start + off, count);
+       return count;
+#else
+       return -EINVAL;
+#endif
+}
+
+/* sysfs: Write the modem firmware including TOC */
+static ssize_t modemfw_write(struct file *f, struct kobject *kobj,
+                       struct bin_attribute *attr,
+                       char *buf, loff_t off, size_t count)
+{
+       if (commited)
+               return -EBUSY;
+
+       if (off + count > xshm_size)
+               return -ENOSPC;
+       memcpy(shm_start + off, buf, count);
+       modem_bootimg_size = off + count;
+       return count;
+}
+
+/* sysfs: Modem firmware attribute */
+static struct bin_attribute modemfw_attr = {
+       .attr = {
+                .name = "bootimg",
+                .mode = S_IRUGO | S_IWUSR,
+                },
+       .size = IMG_MAX_SZ,
+       .read = modemfw_read,
+       .write = modemfw_write
+};
+
+/* sysfs: ipc_ready file */
+static ssize_t ipc_ready_show(struct device *dev, struct device_attribute *attr,
+                               char *buf)
+{
+       return sprintf(buf, "%d\n", ready_for_ipc);
+}
+
+/* sysfs: ipc_ready file */
+static ssize_t caif_ready_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%d\n", ready_for_caif);
+}
+
+static DEVICE_ATTR(ipc_ready, S_IRUSR | S_IRUGO, ipc_ready_show, NULL);
+static DEVICE_ATTR(caif_ready, S_IRUSR | S_IRUGO, caif_ready_show, NULL);
+
+/* sysfs: notification on change of ipc_ready to user space */
+void xshm_ipc_ready(void)
+{
+       sysfs_notify(&parentdev->kobj, NULL, dev_attr_ipc_ready.attr.name);
+}
+
+/* sysfs: notification on change of caif_ready to user space */
+void xshm_caif_ready(void)
+{
+       sysfs_notify(&parentdev->kobj, NULL, dev_attr_caif_ready.attr.name);
+}
+
+/* XSHM Generic NETLINK family */
+static struct genl_family xshm_gnl_family = {
+       .id = GENL_ID_GENERATE,
+       .hdrsize = 0,
+       .name = "XSHM",
+       .version = XSHM_PROTO_VERSION,
+       .maxattr = XSHM_A_MAX,
+};
+
+/* XSHM Netlink attribute policy */
+static const struct nla_policy xshm_genl_policy[XSHM_A_MAX + 1] = {
+       [XSHM_A_VERSION] = { .type = NLA_U8 },
+       [XSHM_A_SUB_VERSION] = { .type = NLA_U8 },
+       [__XSHM_A_FLAGS] = { .type = NLA_U32 },
+       [XSHM_A_NAME] = { .type = NLA_NUL_STRING, .len = XSHM_NAMESZ},
+       [XSHM_A_RX_CHANNEL] = { .type = NLA_NESTED },
+       [XSHM_A_TX_CHANNEL] = { .type = NLA_NESTED },
+       [XSHM_A_PRIORITY] = { .type = NLA_U8 },
+       [XSHM_A_LATENCY] = { .type = NLA_U8 },
+};
+
+/* Policy for uni-directional attributes for stream */
+static const struct nla_policy stream_policy[XSHM_A_MAX + 1] = {
+       [XSHM_A_CHANNEL_SIZE] = { .type = NLA_U32 },
+};
+
+/* Policy for uni-directional attributes for packet */
+static const struct nla_policy packet_policy[XSHM_A_MAX + 1] = {
+       [XSHM_A_CHANNEL_SIZE] = { .type = NLA_U32 },
+       [XSHM_A_CHANNEL_BUFFERS] = { .type = NLA_U32 },
+       [XSHM_A_MTU] = { .type = NLA_U16 },
+       [XSHM_A_ALIGNMENT] = { .type = NLA_U8 },
+       [XSHM_A_PACKETS] = { .type = NLA_U8 },
+};
+
+static int xshm_add_udchannel(struct xshm_udchannel *chn, int attr,
+                       struct genl_info *info, struct nla_policy const *policy)
+{
+       struct nlattr *nla;
+       int nla_rem;
+
+       if (!info->attrs[attr])
+               return -EINVAL;
+
+       if (nla_validate_nested(info->attrs[attr],
+                                       XSHM_A_MAX,
+                                       policy) != 0) {
+               pr_info("Invalid RX channel attributes\n");
+               return -EINVAL;
+       }
+
+       nla_for_each_nested(nla, info->attrs[attr], nla_rem) {
+
+               if (nla_type(nla) == XSHM_A_CHANNEL_SIZE)
+                       chn->ch_size = nla_get_u32(nla);
+
+               if (nla_type(nla) == XSHM_A_CHANNEL_BUFFERS)
+                       chn->buffers = nla_get_u32(nla);
+
+               if (nla_type(nla) == XSHM_A_MTU)
+                       chn->mtu = nla_get_u16(nla);
+
+               if (nla_type(nla) == XSHM_A_PACKETS)
+                       chn->packets = nla_get_u8(nla);
+
+               if (nla_type(nla) == XSHM_A_ALIGNMENT) {
+                       chn->alignment = nla_get_u8(nla);
+                       chn->alignment = rounddown_pow_of_two(chn->alignment);
+               }
+
+       }
+       return 0;
+}
+
+static int xshm_add_channel(struct xshm_channel *cfg, struct genl_info *info,
+                       int mode)
+{
+       int len, err;
+       struct nla_policy const *policy;
+       char name[XSHM_NAMESZ];
+
+       policy = (mode == XSHM_PACKET_MODE) ? packet_policy : stream_policy;
+
+       if (info->attrs[XSHM_A_VERSION]) {
+               u8 version;
+               u8 sub_version;
+
+               version = nla_get_u8(info->attrs[XSHM_A_VERSION]);
+               if (!info->attrs[XSHM_A_SUB_VERSION])
+                       return -EINVAL;
+               sub_version = nla_get_u8(info->attrs[XSHM_A_SUB_VERSION]);
+               if (version != 1 || sub_version != 0) {
+                       pr_info("Bad version or sub_version\n");
+                       return -EINVAL;
+               }
+       }
+
+       if (!info->attrs[XSHM_A_NAME]) {
+               pr_debug("Name not specified\n");
+               return -EINVAL;
+       }
+
+       len = nla_strlcpy(name, info->attrs[XSHM_A_NAME],
+                       XSHM_NAMESZ);
+
+       if (len > XSHM_NAMESZ)
+               return -EINVAL;
+
+       err = copy_name(name, cfg->name, sizeof(name));
+       if (err < 0)
+               return err;
+
+       cfg->excl_group = 1;
+       if (info->attrs[XSHM_A_EXCL_GROUP])
+               cfg->excl_group = nla_get_u8(info->attrs[XSHM_A_EXCL_GROUP]);
+
+       err = xshm_add_udchannel(&cfg->rx, XSHM_A_RX_CHANNEL, info, policy);
+
+       if (err)
+               return err;
+       err = xshm_add_udchannel(&cfg->tx, XSHM_A_TX_CHANNEL, info, policy);
+
+       if (err)
+               return err;
+
+       if (info->attrs[XSHM_A_PRIORITY]) {
+               cfg->priority = nla_get_u8(info->attrs[XSHM_A_PRIORITY]);
+               /* silently fixup bad value */
+               if (cfg->priority > 7)
+                       cfg->priority = 0;
+       }
+
+       if (info->attrs[XSHM_A_LATENCY])
+               cfg->latency = nla_get_u8(info->attrs[XSHM_A_LATENCY]);
+
+       if (info->attrs[__XSHM_A_FLAGS])
+               cfg->mode |= nla_get_u32(info->attrs[__XSHM_A_FLAGS]);
+
+
+       return 0;
+}
+
+static int do_reply(struct genl_info *info, int result)
+{
+       struct sk_buff *msg;
+       int err;
+       void *reply;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (msg == NULL)
+               return -ENOMEM;
+
+       reply = genlmsg_put_reply(msg, info, &xshm_gnl_family, 0, result);
+       if (reply == NULL) {
+               kfree_skb(msg);
+               return -EMSGSIZE;
+       }
+
+       genlmsg_end(msg, reply);
+       err = genlmsg_reply(msg, info);
+       return err;
+}
+
+static int xshm_add_ch(struct sk_buff *skb, struct genl_info *info, int mode)
+{
+       int err;
+       struct xshm_channel cfg;
+       struct xshm_dev *xshmdev;
+
+       if (xshm_channels + 1 > XSHM_MAX_CHANNELS) {
+               pr_debug("Too many channels added\n");
+               return -EINVAL;
+       }
+
+       memset(&cfg, 0, sizeof(cfg));
+       cfg.mode = mode;
+       err = xshm_add_channel(&cfg, info, mode);
+       if (err)
+               return err;
+
+       xshmdev = kzalloc(sizeof(*xshmdev), GFP_KERNEL);
+       if (xshmdev == NULL)
+               return -ENOMEM;
+
+       if (mode == XSHM_PACKET_MODE)
+               xshmdev->pdev.name = "xshmp";
+       else
+               xshmdev->pdev.name = "xshms";
+
+       xshmdevs[xshm_channels] = xshmdev;
+       xshmdevs[xshm_channels]->cfg = cfg;
+       xshmdev->pdev.id = xshm_channels;
+       xshmdev->pdev.dev.parent = parentdev;
+       xshmdev->pdev.dev.release = xshmdev_release;
+       xshmdevs[xshm_channels] = xshmdev;
+
+       ++xshm_channels;
+
+       err = xshm_verify_config(&xshmdev->cfg);
+       if (err)
+               goto error;
+       err = do_reply(info, 0);
+       if (err)
+               goto error;
+       return err;
+
+error:
+       --xshm_channels;
+       kfree(xshmdev);
+       return err;
+}
+
+static int xshm_add_packet_ch(struct sk_buff *skb, struct genl_info *info)
+{
+       return xshm_add_ch(skb, info, XSHM_PACKET_MODE);
+}
+
+static int xshm_add_stream_ch(struct sk_buff *skb, struct genl_info *info)
+{
+       return xshm_add_ch(skb, info, XSHM_STREAM_MODE);
+}
+
+
+static int xshm_c_commit(struct sk_buff *skb, struct genl_info *info)
+{
+       int err = do_commit();
+       if (!err)
+               do_reply(info, 0);
+       return err;
+}
+
+static int xshm_c_register(struct sk_buff *skb, struct genl_info *info)
+{
+       int err = do_register();
+       if (!err)
+               do_reply(info, 0);
+       return err;
+}
+
+static int xshm_c_set_addr(struct sk_buff *skb, struct genl_info *info)
+{
+       int err = do_set_addr();
+       if (!err)
+               do_reply(info, 0);
+       return err;
+}
+
+static int xshm_c_reset(struct sk_buff *skb, struct genl_info *info)
+{
+       do_reset();
+       do_reply(info, 0);
+       return 0;
+}
+
+static int xshm_c_verify(struct sk_buff *skb, struct genl_info *info)
+{
+       int err = verify_config();
+       if (!err)
+               do_reply(info, 0);
+       return err;
+}
+
+static struct genl_ops xshm_genl_ops[] = {
+       {
+       .cmd = XSHM_C_ADD_STREAM_CHANNEL,
+       .flags = GENL_ADMIN_PERM,
+       .policy = xshm_genl_policy,
+       .doit = xshm_add_stream_ch,
+       .dumpit = NULL,
+       },
+       {
+       .cmd = XSHM_C_ADD_PACKET_CHANNEL,
+       .flags = GENL_ADMIN_PERM,
+       .policy = xshm_genl_policy,
+       .doit = xshm_add_packet_ch,
+       .dumpit = NULL,
+       },
+       {
+       .cmd = XSHM_C_COMMIT,
+       .flags = GENL_ADMIN_PERM,
+       .doit = xshm_c_commit,
+       .dumpit = NULL,
+       },
+       {
+       .cmd = XSHM_C_REGISTER,
+       .flags = GENL_ADMIN_PERM,
+       .doit = xshm_c_register,
+       .dumpit = NULL,
+       },
+       {
+       .cmd = XSHM_C_SET_ADDR,
+       .flags = GENL_ADMIN_PERM,
+       .doit = xshm_c_set_addr,
+       .dumpit = NULL,
+       },
+       {
+       .cmd = XSHM_C_RESET,
+       .flags = GENL_ADMIN_PERM,
+       .doit = xshm_c_reset,
+       .dumpit = NULL,
+       },
+       {
+       .cmd = __XSHM_C_VERIFY,
+       .flags = GENL_ADMIN_PERM,
+       .doit = xshm_c_verify,
+       .dumpit = NULL,
+       },
+
+};
+
+static bool gennetl_reg;
+
+/* Initialize boot handling and create sysfs entries*/
+int xshm_boot_init(void)
+{
+       int err = -EINVAL;
+       bool xshm_fake = false;
+
+       /* Negative xshm_size indicates module test without real SHM */
+       if (xshm_size < 0) {
+               xshm_fake = true;
+               xshm_size = abs(xshm_size);
+       }
+
+       if (xshm_size < TOC_SZ)
+               goto bad_config;
+
+       if (xshm_fake) {
+               shm_start = kzalloc(xshm_size, GFP_KERNEL);
+               err = -ENOMEM;
+               if (!shm_start)
+                       goto error_nodev;
+               xshm_start = (unsigned long) shm_start;
+               memset(shm_start, 0xaa, xshm_size);
+       } else {
+               if (xshm_start == 0)
+                       goto bad_config;
+               shm_start = ioremap(xshm_start, xshm_size);
+       }
+
+       /* Initiate the Master TOC to 0xff for the first 512 bytes */
+       if (xshm_size > TOC_SZ)
+               memset(shm_start, 0xff, TOC_SZ);
+
+       modem_bootimg_size = TOC_SZ;
+
+       pr_debug("Boot image addr: %p size:%d\n", shm_start,
+                       modem_bootimg_size);
+
+       parentdev = &_parentdev;
+       memset(parentdev, 0, sizeof(parentdev));
+       dev_set_name(parentdev, "xshm");
+       parentdev->release = parent_release;
+       err = device_register(parentdev);
+       if (err)
+               goto error_nodev;
+
+       err = device_create_bin_file(parentdev, &modemfw_attr);
+       if (err)
+               goto error;
+       err = device_create_file(parentdev, &dev_attr_ipc_ready);
+       if (err)
+               goto error;
+       err = device_create_file(parentdev, &dev_attr_caif_ready);
+       if (err)
+               goto error;
+
+       err = genl_register_family_with_ops(&xshm_gnl_family,
+               xshm_genl_ops, ARRAY_SIZE(xshm_genl_ops));
+       if (err)
+               goto error;
+
+       gennetl_reg = 1;
+       return err;
+error:
+       pr_debug("initialization failed\n");
+       device_unregister(parentdev);
+
+error_nodev:
+       if (xshm_fake)
+               kfree(shm_start);
+       return err;
+bad_config:
+       pr_err("Bad module configuration:"
+                       " xshm_base_address:%lu xshm_size:%lu err:%d\n",
+                       xshm_start, xshm_size, err);
+       /* Buildin module should not return error */
+       return -EINVAL;
+}
+
+void xshm_boot_exit(void)
+{
+       device_unregister(parentdev);
+
+       if (gennetl_reg)
+               genl_unregister_family(&xshm_gnl_family);
+       gennetl_reg = 0;
+}
diff --git a/drivers/xshm/xshm_chr.c b/drivers/xshm/xshm_chr.c
new file mode 100644 (file)
index 0000000..8593367
--- /dev/null
@@ -0,0 +1,1269 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author:     Per Sigmond / Per.Sigmond@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s :" fmt, __func__
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include <asm/atomic.h>
+#include <linux/xshm/xshm_pdev.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+
+MODULE_LICENSE("GPL");
+static LIST_HEAD(xshmchr_chrdev_list);
+static spinlock_t list_lock;
+
+#define xdev_dbg(dev, fmt, arg...) printk(KERN_DEBUG "%s: %s - " fmt, \
+                       dev ? dev->misc.name : "?", __func__, ##arg)
+#define xdev_devl(dev, fmt, arg...) printk(KERN_DEBUG "%s: %s - " fmt, \
+                       dev ? dev->misc.name : "?" , __func__, ##arg)
+#define pr_xchrstate(dev, str) \
+       xdev_devl(dev, "State: %s %s %s\n", str,        \
+               STATE_IS_PENDING(dev) ? "pending" : "", \
+               STATE_IS_OPEN(dev) ? "open" : "close")
+
+#define OPEN_TOUT                      (25 * HZ)
+#define CONN_STATE_OPEN_BIT            0
+#define CONN_STATE_PENDING_BIT         1
+#define CONN_REMOTE_TEARDOWN_BIT       2
+#define CONN_EOF_BIT                   4
+
+#define STATE_IS_OPEN(dev) test_bit(CONN_STATE_OPEN_BIT, \
+                                       (void *) &(dev)->conn_state)
+#define STATE_IS_REMOTE_TEARDOWN(dev) test_bit(CONN_REMOTE_TEARDOWN_BIT, \
+                                       (void *) &(dev)->conn_state)
+#define STATE_IS_PENDING(dev) test_bit(CONN_STATE_PENDING_BIT, \
+                                       (void *) &(dev)->conn_state)
+#define SET_STATE_OPEN(dev) (set_bit(CONN_STATE_OPEN_BIT,      \
+                       (void *) &(dev)->conn_state), \
+                       pr_devel("SET_STATE_OPEN:%d\n", dev->conn_state))
+#define SET_STATE_CLOSED(dev) (clear_bit(CONN_STATE_OPEN_BIT,  \
+                       (void *) &(dev)->conn_state), \
+                       pr_devel("SET_STATE_CLOSED:%d\n", dev->conn_state))
+#define SET_PENDING_ON(dev) (set_bit(CONN_STATE_PENDING_BIT,   \
+                       (void *) &(dev)->conn_state), \
+                       pr_devel("SET_PENDING_ON:%d\n", dev->conn_state))
+#define SET_PENDING_OFF(dev) (clear_bit(CONN_STATE_PENDING_BIT, \
+                       (void *) &(dev)->conn_state), \
+                       pr_devel("SET_PENDING_OFF:%d\n", dev->conn_state))
+#define SET_REMOTE_TEARDOWN(dev) (set_bit(CONN_REMOTE_TEARDOWN_BIT,    \
+                       (void *) &(dev)->conn_state), \
+                       pr_devel("SET_REMOTE_TEARDOWN:%d\n", dev->conn_state))
+#define CLEAR_REMOTE_TEARDOWN(dev) (clear_bit(CONN_REMOTE_TEARDOWN_BIT, \
+                       (void *) &(dev)->conn_state), \
+                       pr_devel("CLEAR_REMOTE_TEARDOWN:%d\n", dev->conn_state))
+#define SET_EOF(dev) (set_bit(CONN_EOF_BIT,    \
+                       (void *) &(dev)->conn_state), \
+                       pr_devel("SET_EOF:%d\n", dev->conn_state))
+#define CLEAR_EOF(dev) (clear_bit(CONN_EOF_BIT, \
+                       (void *) &(dev)->conn_state), \
+                       pr_devel("CLEAR_EOF:%d\n", dev->conn_state))
+#define STATE_IS_EOF(dev) test_bit(CONN_EOF_BIT, \
+                                       (void *) &(dev)->conn_state)
+
+#define CHR_READ_FLAG 0x01
+#define CHR_WRITE_FLAG 0x02
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfsdir;
+#include <linux/debugfs.h>
+#define        dbfs_atomic_inc(a) atomic_inc(a)
+#define        dbfs_atomic_add(v, a) atomic_add_return(v, a)
+#else
+#define        dbfs_atomic_inc(a) 0
+#define        dbfs_atomic_add(v, a) 0
+#endif
+
+struct ringbuf {
+       __le32 *ri;     /* Pointer to read-index in shared memory.*/
+       __le32 *wi;     /* Pointer to write-index in shared memory */
+       unsigned int size;/* Size of buffer */
+       void *data;     /* Buffer data in shared memory */
+};
+
+struct xshmchr_char_dev {
+       struct xshm_dev *xshm;
+       struct kref kref;
+       struct ringbuf rx, tx;
+       u32 conn_state;
+       char name[256];
+       struct miscdevice misc;
+       int file_mode;
+
+       /* Access to this struct and below layers */
+       struct mutex mutex;
+       wait_queue_head_t mgmt_wq;
+       /* List of misc test devices */
+       struct list_head list_field;
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *debugfs_device_dir;
+       atomic_t num_open;
+       atomic_t num_close;
+       atomic_t num_read;
+       atomic_t num_read_block;
+       atomic_t num_read_bytes;
+
+       atomic_t num_write;
+       atomic_t num_write_block;
+       atomic_t num_write_bytes;
+
+       atomic_t num_init;
+       atomic_t num_init_resp;
+       atomic_t num_deinit;
+       atomic_t num_deinit_resp;
+       atomic_t num_remote_teardown_ind;
+
+#endif
+};
+
+static void xshm_release(struct kref *kref)
+{
+       struct xshmchr_char_dev *dev;
+       dev = container_of(kref, struct xshmchr_char_dev, kref);
+       xdev_devl(dev, "Freeing device\n");
+       kfree(dev);
+}
+
+static void xshmchr_get(struct xshmchr_char_dev *dev)
+{
+       kref_get(&dev->kref);
+}
+
+static void xshmchr_put(struct xshmchr_char_dev *dev)
+{
+       kref_put(&dev->kref, xshm_release);
+}
+
+static inline unsigned int ringbuf_empty(struct ringbuf *rb)
+{
+       return *rb->wi == *rb->ri;
+}
+
+static inline unsigned int ringbuf_full(struct ringbuf *rb)
+{
+       return (le32_to_cpu(*rb->wi) + 1) % rb->size == le32_to_cpu(*rb->ri);
+}
+
+static int insert_ringbuf(struct ringbuf *rb, const char __user *from,
+               u32 len)
+{
+       u32 wi = le32_to_cpu(*rb->wi);
+       u32 ri = le32_to_cpu(*rb->ri);
+       u32 cpylen, cpylen2 = 0, notcpy;
+
+       pr_devel("insert: wi:%d ri:%d len:%d\n", wi, ri, len);
+       if (wi >= ri) {
+               len = min(len, rb->size - 1 - wi + ri);
+               cpylen = min(rb->size, wi + len) - wi;
+
+               /* Write is ahead of read, copy 'cpylen' data from 'wi' */
+               notcpy = copy_from_user(rb->data + wi, from, cpylen);
+               if (cpylen > 0 && notcpy == cpylen)
+                       return -EIO;
+
+               if (cpylen < len && notcpy == 0) {
+                       cpylen2 = min(ri - 1 , len - cpylen);
+
+                       /* We have wrapped copy 'cpylen2' from start */
+                       notcpy = copy_from_user(rb->data, from + cpylen,
+                                       cpylen2);
+               }
+       } else {
+               cpylen = min(ri - 1 - wi , len);
+
+               /* Read is ahead of write, copy from wi to (ri - 1) */
+               notcpy = copy_from_user(rb->data + wi, from, cpylen);
+               if (cpylen > 0 && notcpy == cpylen)
+                       return -EIO;
+
+       }
+       /* Do write barrier before updating index */
+       smp_wmb();
+       *rb->wi = cpu_to_le32((wi + cpylen + cpylen2 - notcpy) % rb->size);
+       pr_devel("write ringbuf: wi: %d->%d l:%d\n",
+                       wi, le32_to_cpu(*rb->wi), cpylen + cpylen2);
+       return cpylen + cpylen2 - notcpy;
+}
+
+static int extract_ringbuf(struct ringbuf *rb, void __user *to, u32 len)
+{
+       u32 wi = le32_to_cpu(*rb->wi);
+       u32 ri = le32_to_cpu(*rb->ri);
+       u32 cpylen = 0, cpylen2 = 0, notcpy;
+
+       pr_devel("extract: wi:%d ri:%d len:%d\n", wi, ri, len);
+       if (ri <= wi) {
+               len = min(wi - ri, len);
+
+               /* Read is ahead of write, copy 'len' data from 'ri' */
+               notcpy = copy_to_user(to, rb->data + ri, len);
+               if (len > 0 && notcpy == len)
+                       return -EIO;
+
+               /* Do write barrier before updating index */
+               smp_wmb();
+               *rb->ri = cpu_to_le32(ri + len - notcpy);
+               pr_devel("read ringbuf: ri: %d->%d len:%d\n",
+                       ri, le32_to_cpu(*rb->ri), len - notcpy);
+
+               return len - notcpy;
+       } else {
+               /* wr >= ri */
+               cpylen = min(rb->size - ri, len);
+
+               /* Write is ahead, copy 'cpylen' data from ri until end */
+               notcpy = copy_to_user(to, rb->data + ri, cpylen);
+               if (cpylen > 0 && notcpy == cpylen)
+                       return -EIO;
+               if (cpylen < len && notcpy == 0) {
+                       cpylen2 = min(wi , len - cpylen);
+                       /* we have wrapped copy from [0 .. cpylen2] */
+                       notcpy = copy_to_user(to + cpylen, rb->data, cpylen2);
+               }
+               /* Do write barrier before updating index */
+               smp_wmb();
+
+               *rb->ri = cpu_to_le32((ri + cpylen + cpylen2 - notcpy)
+                                               % rb->size);
+               pr_devel("read ringbuf: ri: %d->%d cpylen:%d\n",
+                       ri, le32_to_cpu(*rb->ri), cpylen + cpylen2 - notcpy);
+
+               return cpylen + cpylen2 - notcpy;
+       }
+}
+
+static void drain_ringbuf(struct xshmchr_char_dev *dev)
+{
+       /* Empty the ringbuf. */
+       *dev->xshm->cfg.rx.read = *dev->xshm->cfg.rx.write;
+       *dev->xshm->cfg.tx.write = *dev->xshm->cfg.tx.read;
+}
+
+static int open_cb(void *drv)
+{
+       struct xshmchr_char_dev *dev = drv;
+
+       pr_xchrstate(dev, "enter");
+       dbfs_atomic_inc(&dev->num_init_resp);
+       /* Signal reader that data is available. */
+       WARN_ON(!STATE_IS_OPEN(dev));
+       SET_PENDING_OFF(dev);
+       wake_up_interruptible_all(&dev->mgmt_wq);
+       pr_xchrstate(dev, "exit");
+       return 0;
+}
+
+static void close_cb(void *drv)
+{
+       struct xshmchr_char_dev *dev = drv;
+
+       pr_xchrstate(dev, "enter");
+       dbfs_atomic_inc(&dev->num_remote_teardown_ind);
+       if (STATE_IS_PENDING(dev) && !STATE_IS_OPEN(dev)) {
+               /* Normal close sequence */
+               SET_PENDING_OFF(dev);
+               CLEAR_REMOTE_TEARDOWN(dev);
+               SET_EOF(dev);
+               drain_ringbuf(dev);
+               dev->file_mode = 0;
+       } else {
+               /* Remote teardown, close should be called from user-space */
+               SET_REMOTE_TEARDOWN(dev);
+               SET_PENDING_OFF(dev);
+       }
+
+       wake_up_interruptible_all(&dev->mgmt_wq);
+       pr_xchrstate(dev, "exit");
+}
+
+static int ipc_rx_cb(void *drv)
+{
+       struct xshmchr_char_dev *dev = drv;
+
+       xdev_devl(dev, "Enter\n");
+
+       if (unlikely(*dev->xshm->cfg.rx.state == cpu_to_le32(XSHM_CLOSED)))
+               return -ESHUTDOWN;
+
+       /*
+        * Performance could perhaps be improved by having a WAIT
+        * flag, similar to SOCK_ASYNC_WAITDATA, and only do wake up
+        * when it's actually needed.
+        */
+       wake_up_interruptible_all(&dev->mgmt_wq);
+       return 0;
+}
+
+static int ipc_tx_release_cb(void *drv)
+{
+       struct xshmchr_char_dev *dev = drv;
+
+       xdev_devl(dev, "Enter\n");
+       wake_up_interruptible_all(&dev->mgmt_wq);
+       return 0;
+}
+
+/* Device Read function called from Linux kernel */
+static ssize_t xshmchr_chrread(struct file *filp, char __user *buf,
+        size_t count, loff_t *f_pos)
+{
+       unsigned int len = 0;
+       int result;
+       struct xshmchr_char_dev *dev = filp->private_data;
+       ssize_t ret = -EIO;
+
+       if (dev == NULL) {
+               xdev_dbg(dev, "private_data not set!\n");
+               return -EBADFD;
+       }
+
+       /* I want to be alone on dev (except status and queue) */
+       if (mutex_lock_interruptible(&dev->mutex)) {
+               xdev_dbg(dev, "mutex_lock_interruptible got signalled\n");
+               return -ERESTARTSYS;
+       }
+       xshmchr_get(dev);
+
+       if (!STATE_IS_OPEN(dev)) {
+               /* Device is closed or closing. */
+               if (!STATE_IS_PENDING(dev)) {
+                       xdev_dbg(dev, "device is closed (by remote)\n");
+                       ret = -ECONNRESET;
+               } else {
+                       xdev_dbg(dev, "device is closing...\n");
+                       ret = -EBADF;
+               }
+               goto read_error;
+       }
+
+       dbfs_atomic_inc(&dev->num_read);
+
+       /* Device is open or opening. */
+       if (STATE_IS_PENDING(dev)) {
+               xdev_devl(dev, "device is opening...\n");
+
+               dbfs_atomic_inc(&dev->num_read_block);
+               if (filp->f_flags & O_NONBLOCK) {
+                       /* We can't block. */
+                       xdev_dbg(dev, "exit: state pending and O_NONBLOCK\n");
+                       ret = -EAGAIN;
+                       goto read_error;
+               }
+
+               /*
+                * To reach here client must do blocking open,
+                * and start read() before open completes. This is
+                * quite quirky, but let's handle it anyway.
+                */
+               result =
+                   wait_event_interruptible(dev->mgmt_wq,
+                                       !STATE_IS_PENDING(dev) ||
+                                   STATE_IS_REMOTE_TEARDOWN(dev));
+
+               if (result == -ERESTARTSYS) {
+                       xdev_dbg(dev, "wait_event_interruptible"
+                                " woken by a signal (1)\n");
+                       ret = -ERESTARTSYS;
+                       goto read_error;
+               }
+               if (STATE_IS_REMOTE_TEARDOWN(dev)) {
+                       xdev_dbg(dev, "received remote_shutdown indication (1)\n");
+                       ret = -ESHUTDOWN;
+                       goto read_error;
+               }
+       }
+
+       /* Block if we don't have any received buffers.
+        * The queue has its own lock.
+        */
+       while (ringbuf_empty(&dev->rx)) {
+
+               if (filp->f_flags & O_NONBLOCK) {
+                       xdev_devl(dev, "exit: O_NONBLOCK\n");
+                       ret = -EAGAIN;
+                       goto read_error;
+               }
+
+               /* Let writers in. */
+               mutex_unlock(&dev->mutex);
+
+               xdev_devl(dev, "%s:wait for data\n", dev->name);
+               /* Block reader until data arrives or device is closed. */
+               if (wait_event_interruptible(dev->mgmt_wq,
+                               !ringbuf_empty(&dev->rx)
+                               || STATE_IS_REMOTE_TEARDOWN(dev)
+                               || !STATE_IS_OPEN(dev)) == -ERESTARTSYS) {
+                       xdev_devl(dev, "event_interruptible woken by "
+                                "a signal, signal_pending(current) = %d\n",
+                               signal_pending(current));
+                       return -ERESTARTSYS;
+               }
+
+               xdev_devl(dev, "%s:wakeup readq\n", dev->name);
+
+               if (STATE_IS_REMOTE_TEARDOWN(dev) && ringbuf_empty(&dev->rx)) {
+                       if (!STATE_IS_EOF(dev)) {
+                               xdev_dbg(dev, "First EOF OK\n");
+                               SET_EOF(dev);
+                               ret = 0;
+                               goto error_nolock;
+                       }
+                       xdev_dbg(dev, "2'nd EOF - remote_shutdown\n");
+                       ret = -ECONNRESET;
+                       goto error_nolock;
+               }
+
+               /* I want to be alone on dev (except status and queue). */
+               if (mutex_lock_interruptible(&dev->mutex)) {
+                       xdev_dbg(dev, "mutex_lock_interruptible"
+                                       " got signalled\n");
+                       return -ERESTARTSYS;
+               }
+
+               if (!STATE_IS_OPEN(dev)) {
+                       /* Someone closed the link, report error. */
+                       xdev_dbg(dev, "remote end shutdown!\n");
+                       ret = -EBADF;
+                       goto read_error;
+               }
+       }
+
+       xdev_devl(dev, "%s:copy data\n", dev->name);
+       len = extract_ringbuf(&dev->rx, buf, count);
+       if (len <= 0) {
+               xdev_dbg(dev, "Extracting from ringbuf failed\n");
+               ret = -EINVAL;
+               goto read_error;
+       }
+
+       /* Signal to modem that data is read from ringbuf */
+
+       dev->xshm->ipc_rx_release(dev->xshm, false);
+
+       dbfs_atomic_add(len, &dev->num_read_bytes);
+       /* Let the others in. */
+       mutex_unlock(&dev->mutex);
+       xshmchr_put(dev);
+       return len;
+
+read_error:
+       mutex_unlock(&dev->mutex);
+error_nolock:
+       xshmchr_put(dev);
+       return ret;
+}
+
+/* Device write function called from Linux kernel (misc device) */
+static ssize_t xshmchr_chrwrite(struct file *filp, const char __user *buf,
+                     size_t count, loff_t *f_pos)
+{
+       struct xshmchr_char_dev *dev = filp->private_data;
+       ssize_t ret = -EIO;
+       int result;
+       uint len = 0;
+
+       if (dev == NULL) {
+               xdev_dbg(dev, "private_data not set!\n");
+               ret = -EBADFD;
+               goto write_error_no_unlock;
+       }
+
+       pr_xchrstate(dev, "Enter");
+
+       /* I want to be alone on dev (except status and queue). */
+       if (mutex_lock_interruptible(&dev->mutex)) {
+               xdev_dbg(dev, "mutex_lock_interruptible got signalled\n");
+               ret = -ERESTARTSYS;
+               goto write_error_no_unlock;
+       }
+       xshmchr_get(dev);
+
+       dbfs_atomic_inc(&dev->num_write);
+       if (!STATE_IS_OPEN(dev)) {
+               /* Device is closed or closing. */
+               if (!STATE_IS_PENDING(dev)) {
+                       xdev_dbg(dev, "device is closed (by remote)\n");
+                       ret = -EPIPE;
+               } else {
+                       xdev_dbg(dev, "device is closing...\n");
+                       ret = -EBADF;
+               }
+               goto write_error;
+       }
+
+       /* Device is open or opening. */
+       if (STATE_IS_PENDING(dev)) {
+               xdev_dbg(dev, "device is opening...\n");
+
+               dbfs_atomic_inc(&dev->num_write_block);
+               if (filp->f_flags & O_NONBLOCK) {
+                       /* We can't block */
+                       xdev_dbg(dev, "exit: state pending and O_NONBLOCK\n");
+                       ret = -EAGAIN;
+                       goto write_error;
+               }
+
+               /* Blocking mode; state is pending and we need to wait
+                * for its conclusion. (Shutdown_ind set pending off.)
+                */
+               result =
+                   wait_event_interruptible(dev->mgmt_wq,
+                                       !STATE_IS_PENDING(dev) ||
+                                       STATE_IS_REMOTE_TEARDOWN(dev));
+               if (result == -ERESTARTSYS) {
+                       xdev_dbg(dev, "wait_event_interruptible"
+                                " woken by a signal (1)\n");
+                       ret = -ERESTARTSYS;
+                       goto write_error;
+               }
+       }
+       if (STATE_IS_REMOTE_TEARDOWN(dev)) {
+               xdev_dbg(dev, "received remote_shutdown indication\n");
+               ret = -EPIPE;
+               goto write_error;
+       }
+
+       while (ringbuf_full(&dev->tx)) {
+               /* Flow is off. Check non-block flag. */
+               if (filp->f_flags & O_NONBLOCK) {
+                       xdev_dbg(dev, "exit: O_NONBLOCK and tx flow off");
+                       ret = -EAGAIN;
+                       goto write_error;
+               }
+
+               /* Let readers in. */
+               mutex_unlock(&dev->mutex);
+
+               xdev_devl(dev, "wait for write space\n");
+               /* Wait until flow is on or device is closed. */
+               if (wait_event_interruptible(dev->mgmt_wq,
+                                       !ringbuf_full(&dev->tx)
+                                       || !STATE_IS_OPEN(dev)
+                                       || STATE_IS_REMOTE_TEARDOWN(dev)
+                                       ) == -ERESTARTSYS) {
+                       xdev_dbg(dev, "wait_event_interruptible"
+                                " woken by a signal (1)\n");
+                       ret = -ERESTARTSYS;
+                       goto write_error_no_unlock;
+               }
+
+               /* I want to be alone on dev (except status and queue). */
+               if (mutex_lock_interruptible(&dev->mutex)) {
+                       xdev_dbg(dev, "mutex_lock_interruptible "
+                                       "got signalled\n");
+                       ret = -ERESTARTSYS;
+                       goto write_error_no_unlock;
+               }
+
+               xdev_devl(dev, "wakeup got write space\n");
+               if (!STATE_IS_OPEN(dev)) {
+                       /* Someone closed the link, report error. */
+                       xdev_dbg(dev, "remote end shutdown!\n");
+                       ret = -EPIPE;
+                       goto write_error;
+               }
+               if (STATE_IS_REMOTE_TEARDOWN(dev)) {
+                       xdev_dbg(dev, "received remote_shutdown indication\n");
+                       ret = -ESHUTDOWN;
+                       goto write_error;
+               }
+       }
+       len = insert_ringbuf(&dev->tx, buf, count);
+       xdev_devl(dev, "inserted %d bytes\n", len);
+       if (len <= 0) {
+               xdev_dbg(dev, "transmit failed, error = %d\n",
+                               (int) ret);
+               goto write_error;
+       }
+
+       dbfs_atomic_add(len, &dev->num_write_bytes);
+
+       /* Signal to modem that data is put in ringbuf */
+       dev->xshm->ipc_tx(dev->xshm);
+
+       mutex_unlock(&dev->mutex);
+       xshmchr_put(dev);
+       return len;
+
+write_error:
+       mutex_unlock(&dev->mutex);
+write_error_no_unlock:
+       xshmchr_put(dev);
+       return ret;
+}
+
+static unsigned int xshmchr_chrpoll(struct file *filp, poll_table *waittab)
+{
+       struct xshmchr_char_dev *dev = filp->private_data;
+       unsigned int mask = 0;
+
+       if (dev == NULL) {
+               xdev_dbg(dev, "private_data not set!\n");
+               return -EBADFD;
+       }
+
+       /* I want to be alone on dev (except status and queue). */
+       if (mutex_lock_interruptible(&dev->mutex)) {
+               xdev_dbg(dev, "mutex_lock_interruptible got signalled\n");
+               mask |= POLLERR;
+               goto out;
+       }
+       xshmchr_get(dev);
+
+       if (STATE_IS_REMOTE_TEARDOWN(dev)) {
+               xdev_dbg(dev, "not open\n");
+               mask |= POLLRDHUP | POLLHUP;
+               goto out;
+       }
+
+       xdev_devl(dev, "%s: poll wait\n", dev->name);
+       poll_wait(filp, &dev->mgmt_wq, waittab);
+
+       if (STATE_IS_OPEN(dev) && STATE_IS_PENDING(dev))
+               goto out;
+
+       if (!ringbuf_empty(&dev->rx))
+               mask |= (POLLIN | POLLRDNORM);
+
+       if (!ringbuf_full(&dev->tx))
+               mask |= (POLLOUT | POLLWRNORM);
+
+out:
+       mutex_unlock(&dev->mutex);
+       xdev_devl(dev, "poll return mask=0x%04x\n", mask);
+       xshmchr_put(dev);
+       return mask;
+}
+
+/* Usage:
+ * minor >= 0 : find from minor
+ * minor < 0 and name == name : find from name
+ * minor < 0 and name == NULL : get first
+ */
+
+static struct xshmchr_char_dev *find_device(int minor, char *name,
+                                        int remove_from_list)
+{
+       struct list_head *list_node;
+       struct list_head *n;
+       struct xshmchr_char_dev *dev = NULL;
+       struct xshmchr_char_dev *tmp;
+       spin_lock(&list_lock);
+       xdev_devl(dev, "start looping \n");
+       list_for_each_safe(list_node, n, &xshmchr_chrdev_list) {
+               tmp = list_entry(list_node, struct xshmchr_char_dev,
+                               list_field);
+               if (minor >= 0) {       /* find from minor */
+                       if (tmp->misc.minor == minor)
+                               dev = tmp;
+
+               } else if (name) {      /* find from name */
+                       if (!strncmp(tmp->name, name, sizeof(tmp->name)))
+                               dev = tmp;
+               } else {        /* take first */
+                       dev = tmp;
+               }
+
+               if (dev) {
+                       xdev_devl(dev, "match %d, %s \n",
+                                     minor, name);
+                       if (remove_from_list)
+                               list_del(list_node);
+                       break;
+               }
+       }
+       spin_unlock(&list_lock);
+       return dev;
+}
+
+static int xshmchr_chropen(struct inode *inode, struct file *filp)
+{
+       struct xshmchr_char_dev *dev = NULL;
+       int result = -1;
+       int minor = iminor(inode);
+       int mode = 0;
+       int ret = -EIO;
+
+       dev = find_device(minor, NULL, 0);
+       pr_xchrstate(dev, "ENTER");
+
+       if (dev == NULL) {
+               xdev_dbg(dev, "Could not find device\n");
+               return -EBADF;
+       }
+
+       /* I want to be alone on dev (except status and queue). */
+       if (mutex_lock_interruptible(&dev->mutex)) {
+               xdev_dbg(dev, "mutex_lock_interruptible got signalled\n");
+               return -ERESTARTSYS;
+       }
+
+       xshmchr_get(dev);
+       dbfs_atomic_inc(&dev->num_open);
+       filp->private_data = dev;
+
+       switch (filp->f_flags & O_ACCMODE) {
+       case O_RDONLY:
+               mode = CHR_READ_FLAG;
+               break;
+       case O_WRONLY:
+               mode = CHR_WRITE_FLAG;
+               break;
+       case O_RDWR:
+               mode = CHR_READ_FLAG | CHR_WRITE_FLAG;
+               break;
+       }
+
+       /* If device is not open, make sure device is in fully closed state. */
+       if (!STATE_IS_OPEN(dev)) {
+               /* Has link close response been received
+                * (if we ever sent it)?
+                */
+               if (STATE_IS_PENDING(dev)) {
+                       /* Still waiting for close response from remote.
+                        * If opened non-blocking, report "would block".
+                        */
+                       if (filp->f_flags & O_NONBLOCK) {
+                               xdev_devl(dev, "%s: exit: "
+                                               "O_NONBLOCK && close pending\n",
+                                               dev->name);
+                               ret = -EAGAIN;
+                               goto open_error;
+                       }
+
+                       xdev_devl(dev, "%s:WAIT for close response"
+                                       "from remote\n", dev->name);
+
+                       /*
+                        * Blocking mode; close is pending and we need to wait
+                        * for its conclusion. However modem may be dead,
+                        * or resureccted and alive waiting for
+                        * an open ack.
+                        * It's hard to get this rigth - if state is
+                        * pending. We have missed a state update,
+                        * let's just wait for ack, and then proceede
+                        * with watever state we have.
+                        */
+                       result =
+                           wait_event_interruptible_timeout(dev->mgmt_wq,
+                                           !STATE_IS_PENDING(dev) ||
+                                           STATE_IS_REMOTE_TEARDOWN(dev),
+                                           OPEN_TOUT);
+
+                       if (result == -ERESTARTSYS) {
+                               xdev_dbg(dev, "%s:wait_event_interruptible"
+                                       " woken by a signal (1)\n", dev->name);
+                               ret = -ERESTARTSYS;
+                               goto open_error;
+                       }
+
+                       if (result == 0) {
+                               SET_PENDING_OFF(dev);
+                               pr_xchrstate(dev, "Timeout -pending close;"
+                                               "Clear pending");
+                       } else
+                               pr_xchrstate(dev, "wakeup (wait for close)");
+               }
+       }
+
+       /* Device is now either closed, pending open or open */
+       if (STATE_IS_OPEN(dev) && !STATE_IS_PENDING(dev)) {
+               /* Open */
+               xdev_devl(dev, "%s:Device is already opened (dev=%p) check"
+                               "access f_flags = 0x%x file_mode = 0x%x\n",
+                               dev->name, dev, mode, dev->file_mode);
+
+               if (mode & dev->file_mode) {
+                       xdev_devl(dev, "%s:Access mode already in use 0x%x\n",
+                                       dev->name, mode);
+                       ret = -EBUSY;
+                       goto open_error;
+               }
+       } else {
+
+               /* We are closed or pending open.
+                * If closed:       send link setup
+                * If pending open: link setup already sent (we could have been
+                *                  interrupted by a signal last time)
+                */
+               if (!STATE_IS_OPEN(dev)) {
+                       /* First opening of file; do connect */
+
+                       SET_STATE_OPEN(dev);
+                       SET_PENDING_ON(dev);
+                       CLEAR_EOF(dev);
+                       /* Send "open" by resetting indexes */
+                       result = dev->xshm->open(dev->xshm);
+
+                       if (result < 0) {
+                               xdev_dbg(dev, "%s:can't open channel\n",
+                                               dev->name);
+                               ret = -EIO;
+                               SET_STATE_CLOSED(dev);
+                               SET_PENDING_OFF(dev);
+                               goto open_error;
+                       }
+                       dbfs_atomic_inc(&dev->num_init);
+               }
+
+               /* If opened non-blocking, report "success".
+                */
+               if (filp->f_flags & O_NONBLOCK) {
+                       xdev_devl(dev, "%s: EXIT: O_NONBLOCK success\n",
+                                       dev->name);
+                       ret = 0;
+                       goto open_success;
+               }
+
+               xdev_devl(dev, "%s:WAIT for connect response\n", dev->name);
+               /*
+                * misc_open holds a global mutex anyway so there is no
+                * reason to release our own while waiting
+                */
+               result =
+                   wait_event_interruptible_timeout(dev->mgmt_wq,
+                                   !STATE_IS_PENDING(dev) ||
+                                   STATE_IS_REMOTE_TEARDOWN(dev),
+                                   OPEN_TOUT);
+               if (result == 0) {
+                       xdev_dbg(dev, "%s:wait_event_interruptible "
+                                       "timed out (1)\n", dev->name);
+                       ret = -ETIMEDOUT;
+                       goto open_error;
+               }
+               if (result == -ERESTARTSYS) {
+                       xdev_dbg(dev, "%s:wait_event_interruptible"
+                                       " woken by a signal (2)\n", dev->name);
+                       ret = -ERESTARTSYS;
+                       goto open_error;
+               }
+               if (STATE_IS_REMOTE_TEARDOWN(dev)) {
+                       xdev_dbg(dev, "received remote_shutdown indication\n");
+                       ret = -ESHUTDOWN;
+                       goto open_error;
+               }
+
+               pr_xchrstate(dev, "wakeup (wait for open)");
+               if (!STATE_IS_OPEN(dev)) {
+                       /* Lower layers said "no". */
+                       xdev_dbg(dev, "%s:xshmchr_chropen: Closed received\n",
+                                       dev->name);
+                       ret = -EPIPE;
+                       goto open_error;
+               }
+
+               xdev_devl(dev, "%s: connect received\n", dev->name);
+       }
+open_success:
+       /* Open is OK. */
+       dev->file_mode |= mode;
+
+       xdev_devl(dev, "%s: file mode = %x\n",
+                       dev->name, dev->file_mode);
+       pr_xchrstate(dev, "EXIT");
+
+       mutex_unlock(&dev->mutex);
+       xshmchr_put(dev);
+       return 0;
+
+open_error:
+       SET_STATE_CLOSED(dev);
+       SET_PENDING_OFF(dev);
+       mutex_unlock(&dev->mutex);
+       xshmchr_put(dev);
+       return ret;
+}
+
+static int xshmchr_chrrelease(struct inode *inode, struct file *filp)
+{
+       struct xshmchr_char_dev *dev = NULL;
+       int minor = iminor(inode);
+       int mode = 0;
+
+
+       dev = find_device(minor, NULL, 0);
+       if (dev == NULL) {
+               xdev_dbg(dev, "Could not find device\n");
+               return -EBADF;
+       }
+
+       pr_xchrstate(dev, "enter");
+
+       /* I want to be alone on dev (except status queue). */
+       if (mutex_lock_interruptible(&dev->mutex)) {
+               xdev_dbg(dev, "mutex_lock_interruptible got signalled\n");
+               return -ERESTARTSYS;
+       }
+
+       xshmchr_get(dev);
+       dbfs_atomic_inc(&dev->num_close);
+
+       /* Is the device open? */
+       if (!STATE_IS_OPEN(dev)) {
+               xdev_devl(dev, "Device not open (dev=%p) \n",
+                             dev);
+               mutex_unlock(&dev->mutex);
+               xshmchr_put(dev);
+               return 0;
+       }
+
+       switch (filp->f_flags & O_ACCMODE) {
+       case O_RDONLY:
+               mode = CHR_READ_FLAG;
+               break;
+       case O_WRONLY:
+               mode = CHR_WRITE_FLAG;
+               break;
+       case O_RDWR:
+               mode = CHR_READ_FLAG | CHR_WRITE_FLAG;
+               break;
+       }
+
+       dev->file_mode &= ~mode;
+       if (dev->file_mode) {
+               xdev_devl(dev, "Device is kept open by someone else, "
+                        " don't close. XSHMCHR connection - file_mode = %x\n",
+                        dev->file_mode);
+               mutex_unlock(&dev->mutex);
+               xshmchr_put(dev);
+               return 0;
+       }
+
+       /* IS_CLOSED have double meaning:
+        * 1) Spontanous Remote Shutdown Request.
+        * 2) Ack on a channel teardown(disconnect)
+        * Must clear bit, in case we previously received
+        * a remote shudown request.
+        */
+
+       SET_STATE_CLOSED(dev);
+       SET_PENDING_ON(dev);
+       CLEAR_REMOTE_TEARDOWN(dev);
+       SET_EOF(dev);
+
+       dev->xshm->close(dev->xshm);
+
+       dbfs_atomic_inc(&dev->num_deinit);
+
+       /* Empty the ringbuf */
+       drain_ringbuf(dev);
+       dev->file_mode = 0;
+
+       mutex_unlock(&dev->mutex);
+       pr_xchrstate(dev, "exit");
+       xshmchr_put(dev);
+       return 0;
+}
+
+static const struct file_operations xshmchr_chrfops = {
+       .owner = THIS_MODULE,
+       .read = xshmchr_chrread,
+       .write = xshmchr_chrwrite,
+       .open = xshmchr_chropen,
+       .release = xshmchr_chrrelease,
+       .poll = xshmchr_chrpoll,
+};
+
+static int cfshm_probe(struct platform_device *pdev)
+
+{
+       struct xshmchr_char_dev *dev = NULL;
+       int result;
+       struct xshm_dev *xshm = pdev->dev.platform_data;
+       xdev_devl(dev, "cfshm_probe called\n");
+
+       if (xshm == NULL)
+               return 0;
+
+       /* Allocate device */
+       dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+
+       if (!dev) {
+               pr_err("kmalloc failed.\n");
+               return -ENOMEM;
+       }
+
+       memset(dev, 0, sizeof(*dev));
+       kref_init(&dev->kref);
+
+       dev->xshm = xshm;
+       mutex_init(&dev->mutex);
+       init_waitqueue_head(&dev->mgmt_wq);
+
+       /* Fill in some information concerning the misc device. */
+       dev->misc.minor = MISC_DYNAMIC_MINOR;
+       if (strlen(xshm->cfg.name) == 0) {
+               xdev_dbg(dev, "Platform device does not have a name\n");
+               return -EINVAL;
+       }
+       sprintf(dev->name, "%s", xshm->cfg.name);
+       dev->misc.name = dev->name;
+       dev->misc.fops = &xshmchr_chrfops;
+
+       dev->tx.ri = xshm->cfg.tx.read;
+       dev->tx.wi = xshm->cfg.tx.write;
+       dev->tx.data = xshm->cfg.tx.addr;
+       dev->tx.size = xshm->cfg.tx.ch_size - 1;
+
+       dev->rx.ri = xshm->cfg.rx.read;
+       dev->rx.wi = xshm->cfg.rx.write;
+       dev->rx.data = xshm->cfg.rx.addr;
+       dev->rx.size = xshm->cfg.rx.ch_size - 1;
+       if (dev->rx.size < 2 || dev->tx.size < 2) {
+               dev->rx.size = 0;
+               dev->tx.size = 0;
+               xdev_dbg(dev, "dev:%s error - channel size too small\n",
+                               dev->name);
+               return -EINVAL;
+       }
+       dev->xshm->ipc_rx_cb = ipc_rx_cb;
+       dev->xshm->ipc_tx_release_cb = ipc_tx_release_cb;
+       dev->xshm->open_cb = open_cb;
+       dev->xshm->close_cb = close_cb;
+       dev->xshm->driver_data = dev;
+
+       xdev_devl(dev, "register pdev:%s chr=%s(%s) dev=%p\n", xshm->pdev.name,
+                       dev->name, xshm->cfg.name, dev);
+
+       /* Register the device. */
+       dev->misc.parent = &xshm->pdev.dev;
+       result = misc_register(&dev->misc);
+
+       /* Lock in order to try to stop someone from opening the device
+        * too early. The misc device has its own lock. We cannot take our
+        * lock until misc_register() is finished, because in open() the
+        * locks are taken in this order (misc first and then dev).
+        * So anyone managing to open the device between the misc_register
+        * and the mutex_lock will get a "device not found" error. Don't
+        * think it can be avoided.
+        */
+       if (mutex_lock_interruptible(&dev->mutex)) {
+               xdev_dbg(dev, "mutex_lock_interruptible got signalled\n");
+               return -ERESTARTSYS;
+       }
+
+       if (result < 0) {
+               pr_warn("XSHMCHR: chnl_chr: error - %d, can't register misc.\n",
+                             result);
+               mutex_unlock(&dev->mutex);
+               goto err_failed;
+       }
+
+       xdev_devl(dev, "XSHMCHR: dev: "
+                       "Registered dev with name=%s minor=%d, dev=%p\n",
+                       dev->misc.name, dev->misc.minor, dev->misc.this_device);
+
+       SET_STATE_CLOSED(dev);
+       SET_PENDING_OFF(dev);
+       CLEAR_REMOTE_TEARDOWN(dev);
+       CLEAR_EOF(dev);
+
+       /* Add the device. */
+       spin_lock(&list_lock);
+       list_add(&dev->list_field, &xshmchr_chrdev_list);
+       spin_unlock(&list_lock);
+
+#ifdef CONFIG_DEBUG_FS
+       if (debugfsdir != NULL) {
+               dev->debugfs_device_dir =
+                   debugfs_create_dir(dev->misc.name, debugfsdir);
+               debugfs_create_u32("conn_state", S_IRUSR | S_IWUSR,
+                                  dev->debugfs_device_dir, &dev->conn_state);
+               debugfs_create_u32("num_open", S_IRUSR | S_IWUSR,
+                                  dev->debugfs_device_dir,
+                                  (u32 *) &dev->num_open);
+               debugfs_create_u32("num_close", S_IRUSR | S_IWUSR,
+                                  dev->debugfs_device_dir,
+                                  (u32 *) &dev->num_close);
+               debugfs_create_u32("num_init", S_IRUSR | S_IWUSR,
+                                  dev->debugfs_device_dir,
+                                  (u32 *) &dev->num_init);
+               debugfs_create_u32("num_init_resp", S_IRUSR | S_IWUSR,
+                                  dev->debugfs_device_dir,
+                                  (u32 *) &dev->num_init_resp);
+               debugfs_create_u32("num_deinit", S_IRUSR | S_IWUSR,
+                                  dev->debugfs_device_dir,
+                                  (u32 *) &dev->num_deinit);
+               debugfs_create_u32("num_deinit_resp", S_IRUSR | S_IWUSR,
+                                  dev->debugfs_device_dir,
+                                  (u32 *) &dev->num_deinit_resp);
+               debugfs_create_u32("num_remote_teardown_ind",
+                                  S_IRUSR | S_IWUSR, dev->debugfs_device_dir,
+                                  (u32 *) &dev->num_remote_teardown_ind);
+               debugfs_create_u32("num_read",
+                                  S_IRUSR | S_IWUSR, dev->debugfs_device_dir,
+                                  (u32 *) &dev->num_read);
+               debugfs_create_u32("num_read_block",
+                                  S_IRUSR | S_IWUSR, dev->debugfs_device_dir,
+                                  (u32 *) &dev->num_read_block);
+               debugfs_create_u32("num_read_bytes",
+                                  S_IRUSR | S_IWUSR, dev->debugfs_device_dir,
+                                  (u32 *) &dev->num_read_bytes);
+               debugfs_create_u32("num_write",
+                                  S_IRUSR | S_IWUSR, dev->debugfs_device_dir,
+                                  (u32 *) &dev->num_write);
+               debugfs_create_u32("num_write_block",
+                                  S_IRUSR | S_IWUSR, dev->debugfs_device_dir,
+                                  (u32 *) &dev->num_write_block);
+               debugfs_create_u32("num_write_bytes",
+                                  S_IRUSR | S_IWUSR, dev->debugfs_device_dir,
+                                  (u32 *) &dev->num_write_bytes);
+
+               debugfs_create_u32("rx_write_index",
+                                  S_IRUSR | S_IWUSR, dev->debugfs_device_dir,
+                               (u32 *) dev->xshm->cfg.rx.write);
+               debugfs_create_u32("rx_read_index",
+                                  S_IRUSR | S_IWUSR, dev->debugfs_device_dir,
+                               (u32 *) dev->xshm->cfg.rx.read);
+               debugfs_create_u32("rx_state",
+                                  S_IRUSR | S_IWUSR, dev->debugfs_device_dir,
+                               (u32 *) dev->xshm->cfg.rx.state);
+               debugfs_create_u32("tx_write_index",
+                                  S_IRUSR | S_IWUSR, dev->debugfs_device_dir,
+                               (u32 *) dev->xshm->cfg.tx.write);
+               debugfs_create_u32("tx_read_index",
+                                  S_IRUSR | S_IWUSR, dev->debugfs_device_dir,
+                               (u32 *) dev->xshm->cfg.tx.read);
+               debugfs_create_u32("tx_state",
+                                  S_IRUSR | S_IWUSR, dev->debugfs_device_dir,
+                               (u32 *) dev->xshm->cfg.tx.state);
+
+       }
+#endif
+       mutex_unlock(&dev->mutex);
+       return 0;
+err_failed:
+       xshmchr_put(dev);
+       return result;
+}
+
+static int chrdev_remove(struct xshmchr_char_dev *dev)
+{
+       if (!dev)
+               return -EBADF;
+
+       if (STATE_IS_OPEN(dev)) {
+               xdev_dbg(dev, "Device is opened "
+                        "(dev=%p) file_mode = 0x%x\n",
+                        dev, dev->file_mode);
+               SET_STATE_CLOSED(dev);
+               SET_PENDING_OFF(dev);
+               wake_up_interruptible_all(&dev->mgmt_wq);
+       }
+
+       if (mutex_lock_interruptible(&dev->mutex)) {
+               xdev_dbg(dev, "mutex_lock_interruptible got signalled\n");
+               xshmchr_put(dev);
+               return -ERESTARTSYS;
+       }
+
+       drain_ringbuf(dev);
+
+       misc_deregister(&dev->misc);
+
+       /* Remove from list. */
+       list_del(&dev->list_field);
+
+#ifdef CONFIG_DEBUG_FS
+       if (dev->debugfs_device_dir != NULL)
+               debugfs_remove_recursive(dev->debugfs_device_dir);
+#endif
+
+       mutex_unlock(&dev->mutex);
+       xshmchr_put(dev);
+       return 0;
+}
+
+static int cfshm_remove(struct platform_device *pdev)
+{
+       int err;
+       struct xshm_dev *xshm = pdev->dev.platform_data;
+
+       if (xshm == NULL)
+               return 0;
+       pr_devel("unregister pdev:%s chr=%s pdev=%p\n", xshm->pdev.name,
+                       xshm->cfg.name, pdev);
+
+       err = chrdev_remove(xshm->driver_data);
+       if (err)
+               pr_debug("removing char-dev:%s failed.%d\n",
+                                       xshm->cfg.name, err);
+
+       xshm->ipc_rx_cb = NULL;
+       xshm->ipc_tx_release_cb = NULL;
+       xshm->open_cb = NULL;
+       xshm->close_cb = NULL;
+       xshm->driver_data = NULL;
+       return err;
+}
+
+static struct platform_driver cfshm_plat_drv = {
+       .probe = cfshm_probe,
+       .remove = cfshm_remove,
+       .driver = {
+                  .name = "xshms",
+                  .owner = THIS_MODULE,
+                  },
+};
+
+
+static int __init xshmchr_chrinit_module(void)
+{
+       int err;
+       pr_devel("xshm init\n");
+       spin_lock_init(&list_lock);
+
+       /* Register platform driver. */
+       err = platform_driver_register(&cfshm_plat_drv);
+       if (err) {
+               printk(KERN_ERR "Could not register platform SHM driver: %d.\n",
+                       err);
+               goto err_dev_register;
+       }
+
+#ifdef CONFIG_DEBUG_FS
+       debugfsdir = debugfs_create_dir("xshm_chr", NULL);
+#endif
+
+ err_dev_register:
+       return err;
+
+}
+
+static void __exit xshmchr_chrexit_module(void)
+{
+       int result;
+       struct xshmchr_char_dev *dev = NULL;
+
+       /* Unregister platform driver. */
+       platform_driver_unregister(&cfshm_plat_drv);
+
+       do {
+               /* Remove any device (the first in the list). */
+               dev = find_device(-1, NULL, 0);
+               result = chrdev_remove(dev);
+       } while (result == 0);
+
+#ifdef CONFIG_DEBUG_FS
+       if (debugfsdir != NULL)
+               debugfs_remove_recursive(debugfsdir);
+#endif
+
+}
+
+module_init(xshmchr_chrinit_module);
+module_exit(xshmchr_chrexit_module);
diff --git a/drivers/xshm/xshm_dev.c b/drivers/xshm/xshm_dev.c
new file mode 100644 (file)
index 0000000..1e65049
--- /dev/null
@@ -0,0 +1,468 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s :" fmt, __func__
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/c2c_genio.h>
+#include <linux/xshm/xshm_ipctoc.h>
+#include <linux/xshm/xshm_pdev.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sjur Brændland <sjur.brandeland@stericsson.com>");
+MODULE_DESCRIPTION("External Shared Memory - Supporting direct boot and IPC");
+MODULE_VERSION("XSHM 0.5 : " __DATE__);
+
+static int xshm_inactivity_timeout = 1000;
+module_param(xshm_inactivity_timeout, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(xshm_inactivity_timeout, "Inactivity timeout, ms.");
+
+bool ready_for_ipc;
+bool ready_for_caif;
+static spinlock_t list_lock;
+static LIST_HEAD(pdev_list);
+static spinlock_t timer_lock;
+static int inactivity_timeout;
+static struct timer_list inactivity_timer;
+static bool power_on;
+
+#if 1
+#define xdev_dbg(dev, fmt, arg...) printk(KERN_DEBUG "%s%d: %s - " fmt, \
+                       dev->pdev.name, dev->pdev.id, __func__, ##arg)
+#define xdev_devl(dev, fmt, arg...) printk(KERN_DEBUG "%s%d: %s - " fmt, \
+                       dev->pdev.name, dev->pdev.id, __func__, ##arg)
+#define pr_xshmstate(dev, str) \
+       pr_devel("xshm%d: %s: %s STATE: %s txch:%s(%p) rxch:%s(%p)\n",  \
+                       dev->pdev.id, __func__, str,                    \
+                       dev->state == XSHM_DEV_OPEN ? "open" : "close", \
+                       *dev->cfg.tx.state == cpu_to_le32(XSHM_OPEN) ?  \
+                       "open" : "close",                               \
+                       dev->cfg.tx.state,                              \
+                       *dev->cfg.rx.state == cpu_to_le32(XSHM_OPEN) ?  \
+                       "open" : "close",                               \
+                       dev->cfg.rx.state)
+#else
+#define xdev_dbg(...)
+#define xdev_devl(...)
+#undef pr_debug
+#undef pr_devel
+#define pr_debug(...)
+#define pr_devel(...)
+#define pr_xshmstate(...)
+#endif
+
+static void inactivity_tout(unsigned long arg)
+{
+       unsigned long flags;
+       pr_devel("enter\n");
+       spin_lock_irqsave(&timer_lock, flags);
+       /*
+        * This is paranoia, but if timer is reactivated
+        * before this tout function is scheduled,
+        * we just ignore this timeout.
+        */
+       if (timer_pending(&inactivity_timer))
+               goto out;
+
+       if (power_on) {
+               pr_devel("genio power req(off)\n");
+               genio_power_req(false);
+               power_on = false;
+       }
+out:
+       spin_unlock_irqrestore(&timer_lock, flags);
+}
+
+static void activity(void)
+{
+       unsigned long flags;
+       pr_devel("enter\n");
+       spin_lock_irqsave(&timer_lock, flags);
+       if (!power_on) {
+               pr_devel("genio power req(on)\n");
+               genio_power_req(true);
+               power_on = true;
+       }
+       mod_timer(&inactivity_timer,
+                       jiffies + inactivity_timeout);
+       spin_unlock_irqrestore(&timer_lock, flags);
+}
+
+static void reset_activity_tout(void)
+{
+       unsigned long flags;
+       pr_devel("enter\n");
+       spin_lock_irqsave(&timer_lock, flags);
+       if (power_on) {
+               genio_power_req(false);
+               power_on = false;
+       }
+       del_timer_sync(&inactivity_timer);
+       spin_unlock_irqrestore(&timer_lock, flags);
+}
+
+static int xshmdev_ipc_tx(struct xshm_dev *dev)
+{
+       xdev_devl(dev, "call genio_set_bit(%d)\n", dev->cfg.tx.xfer_bit);
+       activity();
+       return genio_set_bit(dev->cfg.tx.xfer_bit);
+}
+
+static int xshmdev_ipc_rx_release(struct xshm_dev *dev, bool more)
+{
+       xdev_devl(dev, "call genio_set_bit(%d)\n", dev->cfg.tx.xfer_bit);
+       activity();
+       return genio_set_bit(dev->cfg.rx.xfer_done_bit);
+}
+
+static int do_open(struct xshm_dev *dev)
+{
+       int err;
+
+       pr_xshmstate(dev, "enter");
+       err = dev->open_cb(dev->driver_data);
+       if (err < 0) {
+               xdev_dbg(dev, "Error - open_cb failed\n");
+
+               /* Make sure ring-buffer is empty i RX and TX direction */
+               *dev->cfg.rx.read = *dev->cfg.rx.write;
+               *dev->cfg.tx.write = *dev->cfg.tx.read;
+               *dev->cfg.tx.state = cpu_to_le32(XSHM_CLOSED);
+               xdev_devl(dev, "set state = XSHM_DEV_CLOSED\n");
+               dev->state = XSHM_DEV_CLOSED;
+               return err;
+       }
+
+       /* Check is we already have any data in the pipe */
+       if (*dev->cfg.rx.write != *dev->cfg.rx.read) {
+               pr_devel("Received data during opening\n");
+               dev->ipc_rx_cb(dev->driver_data);
+       }
+
+       return err;
+}
+
+static void genio_rx_cb(void *data)
+{
+       struct xshm_dev *dev = data;
+
+       pr_xshmstate(dev, "Enter");
+
+       if (likely(dev->state == XSHM_DEV_OPEN)) {
+               if (unlikely(!ready_for_ipc)) {
+                       xdev_devl(dev, "ready_for_ipc is not yet set\n");
+                       return;
+               }
+
+               if (dev->ipc_rx_cb) {
+                       int err = dev->ipc_rx_cb(dev->driver_data);
+                       if (unlikely(err < 0))
+                               goto remote_close;
+               }
+
+       } else if (*dev->cfg.rx.state == cpu_to_le32(XSHM_OPEN)) {
+               pr_xshmstate(dev, "");
+               dev->state = XSHM_DEV_OPEN;
+               if (!ready_for_ipc) {
+                       xdev_devl(dev, "ready_for_ipc is not yet set\n");
+                       return;
+               }
+               if (do_open(dev) < 0)
+                       goto open_fail;
+       }
+       return;
+open_fail:
+       pr_xshmstate(dev, "exit open failed");
+       /* Make sure ring-buffer is empty i RX and TX direction */
+       *dev->cfg.rx.read = *dev->cfg.rx.write;
+       *dev->cfg.tx.write = *dev->cfg.tx.read;
+remote_close:
+       *dev->cfg.tx.state = cpu_to_le32(XSHM_CLOSED);
+       dev->state = XSHM_DEV_CLOSED;
+       dev->close_cb(dev->driver_data);
+}
+
+static void genio_tx_release_cb(void *data)
+{
+       struct xshm_dev *dev = data;
+
+       pr_xshmstate(dev, "Enter");
+       if (!ready_for_ipc) {
+               xdev_devl(dev, "not ready_for_ipc\n");
+               return;
+       }
+       if (dev->ipc_tx_release_cb)
+               dev->ipc_tx_release_cb(dev->driver_data);
+}
+
+static int xshmdev_open(struct xshm_dev *dev)
+{
+       int err = -EINVAL;
+       struct list_head *node;
+       struct list_head *n;
+
+       pr_xshmstate(dev, "Enter");
+       if (WARN_ON(dev->ipc_rx_cb == NULL) ||
+                       WARN_ON(dev->ipc_tx_release_cb == NULL) ||
+                       WARN_ON(dev->open_cb == NULL) ||
+                       WARN_ON(dev->close_cb == NULL))
+               goto err;
+
+       list_for_each_safe(node, n, &pdev_list) {
+               struct xshm_dev *dev2;
+               dev2 = list_entry(node, struct xshm_dev, node);
+               if (dev2 == dev)
+                       continue;
+
+               if (dev2->state == XSHM_DEV_OPEN &&
+                               dev2->cfg.excl_group != dev->cfg.excl_group) {
+                       xdev_dbg(dev, "Exclusive group "
+                                       "prohibits device open\n");
+                       err = -EPERM;
+                       goto err;
+               }
+       }
+       pr_devel("call genio_subscribe(%d)\n", dev->cfg.rx.xfer_bit);
+       err = genio_subscribe(dev->cfg.rx.xfer_bit, genio_rx_cb, dev);
+       if (err)
+               goto err;
+
+       pr_devel("call genio_subscribe(%d)\n", dev->cfg.tx.xfer_done_bit);
+       err = genio_subscribe(dev->cfg.tx.xfer_done_bit,
+                       genio_tx_release_cb, dev);
+       if (err)
+               goto err;
+
+       /* Indicate that our side is open and ready for action */
+       *dev->cfg.rx.read = *dev->cfg.rx.write;
+       *dev->cfg.tx.write = *dev->cfg.tx.read;
+       *dev->cfg.tx.state = cpu_to_le32(XSHM_OPEN);
+
+       if (ready_for_ipc)
+               err = xshmdev_ipc_tx(dev);
+
+       if (err < 0) {
+               xdev_dbg(dev, "can't update geno\n");
+               goto err;
+       }
+       /* If other side is ready as well we're ready to role */
+       if (*dev->cfg.rx.state == cpu_to_le32(XSHM_OPEN) && ready_for_ipc) {
+               if (do_open(dev) < 0)
+                       goto err;
+               dev->state = XSHM_DEV_OPEN;
+       }
+
+       return 0;
+err:
+       pr_xshmstate(dev, "exit error");
+       *dev->cfg.rx.read = *dev->cfg.rx.write;
+       *dev->cfg.tx.write = *dev->cfg.tx.read;
+       *dev->cfg.tx.state = cpu_to_le32(XSHM_CLOSED);
+       return err;
+}
+
+static void xshmdev_close(struct xshm_dev *dev)
+{
+       pr_xshmstate(dev, "enter");
+
+       dev->state = XSHM_DEV_CLOSED;
+       *dev->cfg.rx.read = *dev->cfg.rx.write;
+       *dev->cfg.tx.state = cpu_to_le32(XSHM_CLOSED);
+       xshmdev_ipc_tx(dev);
+       if (dev->close_cb)
+               dev->close_cb(dev->driver_data);
+
+       pr_devel("call genio_unsubscribe(%d)\n", dev->cfg.rx.xfer_bit);
+       genio_unsubscribe(dev->cfg.rx.xfer_bit);
+       pr_devel("call genio_unsubscribe(%d)\n", dev->cfg.tx.xfer_done_bit);
+       genio_unsubscribe(dev->cfg.tx.xfer_done_bit);
+}
+
+int xshm_register_dev(struct xshm_dev *dev)
+{
+       int err;
+       unsigned long flags;
+
+       dev->state = XSHM_DEV_CLOSED;
+       dev->open = xshmdev_open;
+       dev->close = xshmdev_close;
+       dev->ipc_rx_release = xshmdev_ipc_rx_release;
+       dev->ipc_tx = xshmdev_ipc_tx;
+       /* Driver should only use this when platform_data is set */
+       dev->pdev.dev.platform_data = dev;
+       xdev_devl(dev, "re-register SHM platform device %s\n", dev->pdev.name);
+       err = platform_device_register(&dev->pdev);
+       if (err) {
+               xdev_dbg(dev, "registration failed (%d)\n", err);
+               goto clean;
+       }
+       spin_lock_irqsave(&list_lock, flags);
+       list_add_tail(&dev->node, &pdev_list);
+       spin_unlock_irqrestore(&list_lock, flags);
+
+       return err;
+clean:
+       kfree(dev);
+       return err;
+}
+
+static void genio_caif_ready_cb(bool ready)
+{
+       pr_devel("enter\n");
+       /* Set global variable ready_for_caif true */
+       if (ready_for_caif != ready) {
+               ready_for_caif = ready;
+               xshm_caif_ready();
+       }
+}
+
+static void genio_errhandler(int errno)
+{
+       /* Fake CAIF_READY low to trigger modem restart */
+       pr_warn("Driver reported error:%d\n", errno);
+       ready_for_caif = 0;
+       xshm_caif_ready();
+}
+
+void genio_ipc_ready_cb(void)
+{
+       struct xshm_dev *dev, *tmp;
+       unsigned long flags;
+       int err;
+       u32 getter = 0;
+       u32 setter = 0;
+
+       pr_devel("enter\n");
+       /* Set global variable ready_for_ipc true */
+#ifdef DEBUG
+       /*
+        * In real life read_for_ipc doesn't change, but it's
+        * convenient for testing.
+        */
+       ready_for_ipc = !ready_for_ipc;
+#else
+       ready_for_ipc = true;
+#endif
+
+       xshm_ipc_ready();
+
+       genio_register_errhandler(genio_errhandler);
+
+       pr_devel("call genio_subscribe_caif_ready()\n");
+       err = genio_subscribe_caif_ready(genio_caif_ready_cb);
+       if (err < 0)
+               pr_debug("genio_subscribe_caif_ready failed:%d\n", err);
+
+       /* Take a refcount to the device so it doesn't go away */
+       spin_lock_irqsave(&list_lock, flags);
+       list_for_each_entry_safe(dev, tmp, &pdev_list, node)
+               get_device(&dev->pdev.dev);
+       spin_unlock_irqrestore(&list_lock, flags);
+
+       /* Collect the bit-mask for GENIO bits */
+       list_for_each_entry_safe(dev, tmp, &pdev_list, node) {
+               setter |= 1 << dev->cfg.tx.xfer_bit;
+               setter |= 1 << dev->cfg.rx.xfer_done_bit;
+               getter |= 1 << dev->cfg.rx.xfer_bit;
+               getter |= 1 << dev->cfg.tx.xfer_done_bit;
+       }
+       pr_devel("call genio_bit_alloc(%x,%x)\n", setter, getter);
+       err = genio_bit_alloc(setter, getter);
+       if (err < 0)
+               pr_debug("genio_bit_alloc failed:%d\n", err);
+
+       list_for_each_entry_safe(dev, tmp, &pdev_list, node) {
+               if (dev->cfg.rx.state != NULL && dev->cfg.tx.state != NULL &&
+                               *dev->cfg.rx.state == cpu_to_le32(XSHM_OPEN) &&
+                               *dev->cfg.tx.state == cpu_to_le32(XSHM_OPEN)) {
+                       dev->state = XSHM_DEV_OPEN;
+                       do_open(dev);
+               }
+               put_device(&dev->pdev.dev);
+       }
+}
+
+static int __init xshm_init(void)
+{
+       int err;
+
+       pr_devel("Initializing\n");
+
+       /* Pre-calculate inactivity timeout. */
+       if (xshm_inactivity_timeout != -1) {
+               inactivity_timeout =
+                               xshm_inactivity_timeout * HZ / 1000;
+               if (inactivity_timeout == 0)
+                       inactivity_timeout = 1;
+               else if (inactivity_timeout > NEXT_TIMER_MAX_DELTA)
+                       inactivity_timeout = NEXT_TIMER_MAX_DELTA;
+       } else
+               inactivity_timeout = NEXT_TIMER_MAX_DELTA;
+
+       spin_lock_init(&list_lock);
+       INIT_LIST_HEAD(&pdev_list);
+
+       spin_lock_init(&timer_lock);
+       init_timer(&inactivity_timer);
+       inactivity_timer.data = 0L;
+       inactivity_timer.function = inactivity_tout;
+
+       pr_devel("call genio_init()\n");
+
+       err = xshm_boot_init();
+       if (err)
+               goto err;
+
+       return err;
+err:
+       pr_devel("call genio_exit()\n");
+       return err;
+}
+
+void close_devices(void)
+{
+       struct xshm_dev *dev, *tmp;
+
+       list_for_each_entry_safe(dev, tmp, &pdev_list, node)
+               if (dev->close_cb)
+                       dev->close_cb(dev->driver_data);
+}
+
+void xshm_reset(void)
+{
+       struct xshm_dev *dev, *tmp;
+       unsigned long flags;
+
+       list_for_each_entry_safe(dev, tmp, &pdev_list, node) {
+               get_device(&dev->pdev.dev);
+               if (dev->close_cb)
+                       dev->close_cb(dev->driver_data);
+               platform_device_unregister(&dev->pdev);
+               spin_lock_irqsave(&list_lock, flags);
+               dev->pdev.dev.platform_data = NULL;
+               list_del(&dev->node);
+               spin_unlock_irqrestore(&list_lock, flags);
+               put_device(&dev->pdev.dev);
+       }
+
+       reset_activity_tout();
+       genio_reset();
+}
+
+static void __exit xshm_exit(void)
+{
+       xshm_reset();
+       genio_unsubscribe(READY_FOR_IPC_BIT);
+       genio_unsubscribe(READY_FOR_CAIF_BIT);
+       xshm_boot_exit();
+}
+
+module_init(xshm_init);
+module_exit(xshm_exit);
index f6ae2faa67b739fb23dca9b564397981f3847ea6..504e113ad83a97bb01c9d15f6d6ce3988907bd83 100644 (file)
@@ -18,6 +18,7 @@ header-y += netfilter_bridge/
 header-y += netfilter_ipv4/
 header-y += netfilter_ipv6/
 header-y += usb/
+header-y += xshm/
 header-y += wimax/
 
 objhdr-y += version.h
diff --git a/include/linux/c2c_genio.h b/include/linux/c2c_genio.h
new file mode 100644 (file)
index 0000000..e7a846e
--- /dev/null
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2011
+ * Author: Sjur Brendeland / sjur.brandeland@stericsson.com
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __INC_GENIO_H
+#define __INC_GENIO_H
+#include <linux/types.h>
+
+/**
+ * DOC: C2C GENI/GENO interface.
+ *
+ * This API defines the API for the C2C driver and the GENI/GENO registers.
+ */
+
+/**
+ * enum GENIO_BITS - Definition of special GENIO BITS.
+ * @READY_FOR_IPC_BIT:  Remote side is ready for IPC.
+ * This GENI/GENO bit is triggered when ring-buffer protocol
+ * is enabled from the remote end (modem)
+ *
+ * @READY_FOR_CAIF_BIT: Remote side is ready for CAIF.
+ * This GENI/GENO bit is triggered when CAIF protocol
+ * is enabled from the remote end (modem)
+ */
+enum GENIO_BITS {
+       READY_FOR_CAIF_BIT = 28,
+       READY_FOR_IPC_BIT = 29
+};
+
+/**
+ * genio_subscribe - Subscribe for notifications on bit-change of GENI/O bits.
+ *
+ * @bit:       The GENI/O bit where we want to be called back when it changes.
+ *
+ * @bit_set_cb:        Callback function to be called when the requested GENI/O bit
+ *             is set by the external device (modem).
+ *
+ * @data: Client data to be provided in the callback function.
+ *
+ * Install a callback function for a GENI/O bit. Returns negative upon error.
+ *
+ * The genio driver is expected to handle the 4-state handshake for geni/geno
+ * update, for the bit this function subscribe to. This function may block,
+ * and cannot be called from IRQ context.
+ *
+ * Returns zero on success, and negative upon error.
+ *
+ * Precondition: This function is called after genio_set_shm_addr() and
+ * genio_bit_alloc(). @bit must be defined as as getter in genio_bit_alloc().
+ *
+ * Callback context:
+ *             The "bit_set_cb" callback must be called from the
+ *             IRQ context. The callback function is not allowed to block
+ *             or spend much CPU time in the callback, It must defer
+ *             work to soft-IRQ or work queues.
+ */
+int genio_subscribe(int bit, void (*bit_set_cb)(void *data), void *data);
+
+/**
+ * genio_unsubscribe - Unsubscribe for callback  GENI/O bit.
+ * @bit:       The GENI/O bit to we want release.
+ *
+ * This function may block. It returns zero on success, and negative upon error.
+ *
+ * Precondition: @bit must be defined as as getter in genio_bit_alloc().
+ */
+int genio_unsubscribe(int bit);
+
+/**
+ * genio_bit_alloc - Allocate the usage of GENI/O bits.
+ *
+ * @setter_mask:       Bit-mask defining the bits that can be set by
+ *                     genio_set_bit()
+ * @getter_mask:       Bit-mask defining the bits that can be subscribed by
+ *                     genio_subscribe().
+ *
+ * The @getter_mask defines the bit for RX direction, i.e. bits that can
+ * be subscribed by the function genio_subscribe().
+ * The @setter_mask defines the bit for TX direction, i.e. bits that can
+ * be set by the function genio_set_bit().
+ * This function may block.
+ *
+ * Returns zero on success, and negative upon error.
+ *
+ * Precondition:
+ * This function cannot be called before ipc_ready_cb() has been called,
+ * and must be called prior to any call on genio_subscribe() or genio_set_bit().
+ *
+ */
+int genio_bit_alloc(u32 setter_mask, u32 getter_mask);
+
+/**
+ * genio_set_shm_addr - Inform remote device about the shared memory address.
+ * @addr:              The shared memory address.
+ * @ipc_ready_cb:      The callback function indicating IPC is ready.
+ *
+ * Description:
+ * Implements the setting of shared memory address involving the
+ * READY_FOR_IPC GENIO bits, and READY_FOR_IPC handshaking.
+ * When handshaking is done ipc_ready_cb is called.
+ * The usage of this bit is during start/restart.
+ * This function may block.
+ *
+ * When the ipc_ready_cb() with @ready=%true, Stream channels can be opened.
+ *
+ * Sequence:
+ * (1) Write the address to the GENO register,
+ *
+ * (2) wait for interrupt on IPC_READY (set to one in GENI register)
+ *
+ * (3) write zero to the GENO register
+ *
+ * (4) wait for interrupt on IPC_READY (set to zero)
+ *
+ * (5) call the callback function for IPC_READY
+ *
+ * Returns zero on success, and negative upon error.
+ *
+ * Precondition:
+ * This function must be called initially upon start, or after remote device
+ * has been reset.
+ */
+int genio_set_shm_addr(u32 addr, void (*ipc_ready_cb) (void));
+
+/**
+ * genio_subscribe_caif_ready - Notify that CAIF channels can be
+ * opened.
+ *
+ * @caif_ready_cb: is called with @ready = %true in the start up sequence,
+ * when the remote side is ready for CAIF enumeration. Upon reset,
+ * caif_ready_cb() will be called with @ready = %false.
+ *
+ * The %READY_FOR_CAIF_BIT is set to %one as long as the
+ * modem is able to run CAIF traffic. Upon modem restart/crash it will
+ * be set back to %zero.
+ * This function may block.
+ *
+ * Returns zero on success, and negative upon error.
+ */
+int genio_subscribe_caif_ready(void (*caif_ready_cb) (bool ready));
+
+/**
+ * genio_register_errhandler - Register an error handler.
+ *
+ * @errhandler: error handler called from driver upon severe errors
+ *             that requires reset of the remote device.
+ */
+void genio_register_errhandler(void (*errhandler)(int errno));
+
+/**
+ * genio_reset() - Reset the C2C driver
+ *
+ * Reset the C2C Driver due to remote device restart.
+ * This shall reset state back to initial state, and should only
+ * be used when remote device (modem) has reset.
+ *
+ * All settings, subscriptions and state information in the driver must
+ * be reset. GENIO client must do all subscriptions again.
+ * This function may block.
+ *
+ * Returns zero on success, and negative upon error.
+ */
+int genio_reset(void);
+
+/**
+ * genio_set_bit() -   Set a single GENI/O bit.
+ *
+ * @bit:       The GENI/O bit to set
+ *
+ * This function is used to signal over GENI/GENO the driver must
+ * perform the 4-state protocol to signal the change to remote device.
+ * This function is non-blocking, and can be called from Soft-IRQ context.
+ *
+ * Returns zero on success, and negative upon error.
+ *
+ * Precondition: @bit must be defined as as setter in genio_bit_alloc().
+ */
+int genio_set_bit(int bit);
+
+/**
+ * genio_power_req() - Request power on on remote C2C block.
+ *
+ * @state:     1 - power-on request , 0 - power-off request
+ *
+ * This function will request power on of remote C2C block.
+ * This function is non-blocking, and can be called from Soft-IRQ context.
+ *
+ * Returns zero on success, and negative upon error.
+ */
+int genio_power_req(int state);
+
+#endif /*INC_GENIO_H*/
diff --git a/include/linux/xshm/Kbuild b/include/linux/xshm/Kbuild
new file mode 100644 (file)
index 0000000..4315a21
--- /dev/null
@@ -0,0 +1 @@
+header-y += xshm_netlink.h
diff --git a/include/linux/xshm/xshm_ipctoc.h b/include/linux/xshm/xshm_ipctoc.h
new file mode 100644 (file)
index 0000000..73ea25b
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef XSHM_TOC
+#define XSHM_TOC
+
+/**
+ * DOC: XSHM Shared Memory Layout
+ *
+ * XSHM defines a set of structures describing the memory layout used
+ * for the Shared Memory IPC. In short &toc_entry points out &ipc_toc,
+ * which points out the &xshm_ipctoc_channel. &xshm_ipctoc_channel defines
+ * the channels used to communicate between host and external device (modem).
+ *
+ *  &xshm_ipctoc_channel can be used in packet-mode or stream-mode,
+ *  and points out &xshm_bufidx holding information about cirular
+ *  buffers, andtheir read/write indices etc.
+ */
+
+#pragma pack(1)
+struct _xshm_offsets {
+       __le32 rx;
+       __le32 tx;
+};
+
+/**
+ * struct xshm_ipctoc - Table Of Content definition for IPC.
+ *
+ * @magic:     Magic shall always be set to Ascii coded string "TC" (2 bytes)
+ * @version:   Main version of the TOC header.
+ * @subver:    Sub version of the TOC header.
+ * @channel_offsets: Offset to both rx and tx direction must be set.
+ *                     The array must be terminated by a zero value.
+ *
+ * This struct is stored at the start of the External Shared memory, and
+ * serves as a extended table of contents defining the channel configurations
+ * for the external shared memory protocol between a modem and host.
+ *
+ * This extended table of content (ipctoc) is written to a predefine memory
+ * location and the modem will read this ipctoc during start-up and use this
+ * for setting up the IPC channels and it's buffers.
+ *
+ */
+
+struct xshm_ipctoc {
+       __u8 magic[2];
+       __u8 version;
+       __u8 subver;
+       struct _xshm_offsets channel_offsets[8];
+};
+#define XSHM_PACKET_MODE 0x1
+#define XSHM_STREAM_MODE 0x2
+#define XSHM_LOOP_MODE  0x4
+#define XSHM_PAIR_MODE  0x8
+#define XSHM_MODE_MASK  0x3
+
+#define XSHM_IPCTOC_MAGIC1 'T'
+#define XSHM_IPCTOC_MAGIC2 'C'
+
+/**
+ * struct xshm_ipctoc_channel - Channel descriptor for External Shared memory.
+ *
+ * @offset: Relative address to channel data area.
+ * @size: Total size of a SHM channel area partition.
+ * @mtu: Maximum Transfer Unit for packets in a buffer (packet mode).
+ * @packets: Maximum Number of packets in a buffer (packet mode).
+ * @mode: Mode of channel: Packet mode=1, Stream mode (shm_channel_mode = 2).
+ * @buffers: Number of buffers for the channel.
+ * @ipc: Offset to IPC message location (of type struct xshm_bufidx).
+ * @read_bit: GENI/O bit used to indicate update of the read pointer for
+ *     this channel (at offset ipc).
+ * @write_bit: GENI/O bit used to indicate update of the write pointer for
+ *     this channel (at offset ipc).
+ * @alignment: Protocol specific options for the protocol,
+ *     e.g. packet alignment.
+ *
+ * This struct defines the channel configuration for a single direction.
+ *
+ * This structure is pointed out by the &xshm_toc and is written by
+ * host during start-up and read by modem at firmware boot.
+ *
+ */
+
+struct xshm_ipctoc_channel {
+       __le32 offset;
+       __le32 size;
+/* private: */
+       __u8 unused[3];
+/* public: */
+       __u8 mode;
+       __le32 buffers;
+       __le32 ipc;
+       __le16 write_bit;
+       __le16 read_bit;
+       __u16 mtu;
+       __u8 packets;
+       __u8 alignment;
+};
+
+/**
+ * struct xshm_bufidx - Indices's for a uni-directional xshm channel.
+ *
+ * @read_index: Specify the read index for a channel. This field can
+ *     have value in range of [0.. xshm_ipctoc_channel.buffers -1].
+ *     In stream mode - this is the read index in the ringbuffer.
+ *     In packet mode - this index will at any time refer to the next
+ *     buffer available for read.
+ *
+ * @write_index: Specify the write index for a channel.
+ *     This field can have value in range of [0.. buffers -1].
+ *     In stream mode - this is the write index in the ringbuffer.
+ *     In packet mode - this index will at any time refer to the next
+ *     buffer available for write.
+ *
+ * @size: The actual number of bytes for a buffer at each index.
+ *       This array has xshm_ipctoc_channel.buffers slots, one for each buffer.
+ *       The size is updated every time data is written to the buffer.
+ *
+ * @state: The state of the channel, 0 - Closed, 1 - Open
+ *
+ *
+ * This structure contains data for the ring-buffer used in packet and stream
+ * mode, for the external shared memory protocol.
+ * Note that the read_buf_index and the write_buf_index
+ * refer to two different channels. So for a ring buffer used to communicate
+ * from modem, the modem will update the write_buf_index while Linux host
+ * will update read_buf_index.
+ */
+struct xshm_bufidx {
+       __le32 state;
+       __le32 read_index;
+       __le32 write_index;
+       __le32 size[0];
+};
+
+/** struct toc_entry - Points out the boot imiages
+ *
+ * @start: Offset counting from start of memory area to the image data.
+ * @size:  Size of the images in bytes.
+ * @flags: Use 0 if no flags are in use.
+ * @entry: Where to jump to start exeuting. Only applicable
+ *             when using SDRAM. Set to 0xffffffff if unused.
+ * @load_addr: Location in SDRAM to move image. Set to 0xffffffff if
+ *             not applicable.
+ * @name: Name of image.
+ */
+struct toc_entry {
+       __le32 start;
+       __le32 size;
+       __le32 flags;
+       __le32 entry_point;
+       __le32 load_addr;
+       char name[12];
+};
+#pragma pack()
+
+#endif
diff --git a/include/linux/xshm/xshm_netlink.h b/include/linux/xshm/xshm_netlink.h
new file mode 100644 (file)
index 0000000..80b01de
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2011
+ * Author: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef XSHM_NL_H_
+#define XSHM_NL_H_
+
+#define XSHM_PROTO_VERSION 1
+#define XSHM_PROTO_SUB_VERSION 0
+#define XSHM_NETLINK_VERSION 1
+/**
+ * enum XSHM_COMMANDS - Attributes used for configuring XSHM device.
+ *
+ * @XSHM_C_ADD_STREAM_CHANNEL: Adds a Stream Channel.
+ * This will cause an instance of XSHM-CHR to be created.
+ * @XSHM_C_ADD_PACKET_CHANNEL: Adds a Packet Channel.
+ * This will cause an instance of CAIF-SHM to be created.
+ * @XSHM_C_COMMIT: formats and write Channel configuration data to
+ *     Shared Memory.
+ * @XSHM_C_SET_ADDR: Writes the TOC address to GENO register.
+ * @XSHM_C_REGISTER:  Initiates registration of the channel devices.
+ *     This will cause xshm - character devices or
+ *     CAIF network instances to be created.
+ * @XSHM_C_RESET:  Reset the configuration data and removes the
+ *     platform devices and their associated channel configuration.
+ *     ipc_ready and caif_ready is set to false.
+ *
+ * A normal sequence of events is: [XSHM_C_RESET], [XSHM_C_ADD_X_CHANNEL],
+ *     XSHM_C_COMMIT, XSHM_C_REGISTER, XSHM_C_SET_ADDR.
+ */
+enum XSHM_COMMANDS {
+       XSHM_C_ADD_STREAM_CHANNEL = 1,
+       XSHM_C_ADD_PACKET_CHANNEL,
+       XSHM_C_RESET,
+       XSHM_C_SET_ADDR,
+       XSHM_C_COMMIT,
+       XSHM_C_REGISTER,
+       __XSHM_C_VERIFY,
+       __XSHM_C_MAX
+};
+
+/**
+ * enum XSHM_ATTRIBUTES - Attributes used for configuring XSHM device.
+ * @XSHM_A_VERSION: Version of XSHM netlink protocol. Type NLA_U8
+ * @XSHM_A_SUB_VERSION: Sub-version of XSHM netlink protocol. Type NLA_U8
+ * @XSHM_A_NAME: Name of the channel, max 15 characters. Type NLA_NUL_STRING
+ * @XSHM_A_EXCL_GROUP: Devices may be part of a group. Devices from the
+ *     same group are allowed to be open simultaneously,
+ *     but devices from different groups cannot be opened
+ *     at the same time. Type NLA_U8.
+ * @XSHM_A_RX_CHANNEL: The RX direction attributes. Type NLA_NESTED.
+ *     Each channel may contain the attributes - XSHM_A_CHANNEL_SIZE,
+ *     XSHM_A_CHANNEL_BUFFERS, XSHM_A_ALIGNMENT, XSHM_A_MTU.
+ *
+ * @XSHM_A_TX_CHANNEL: The TX direction attributes. Type NLA_NESTED.
+ *
+ * @XSHM_A_CHANNEL_SIZE: Size of the data area for a channel. Specified
+ *     for RX, TX. Type NLA_U32,
+ * @XSHM_A_CHANNEL_BUFFERS: Numer of buffers for a packet channel.
+ *     This attribute is only used for packet channels.  Specified for RX, TX.
+ *     Type NLA_U32,
+ * @XSHM_A_ALIGNMENT: Alignment for each packet in a buffer. This attribute
+ *      is only used for packet channels.  Specified for RX, TX.Type NLA_U8,
+ * @XSHM_A_MTU: Maximum Transfer Unit for packets in a buffer.
+ *     This is only appplicable for packet channels.
+ *     Specified for RX, TX.Type NLA_U16,
+ * @XSHM_A_PACKETS: Maximum number of packets in a buffer. Type NLA_U8
+ * @XSHM_A_PRIORITY: Priority of the channel, legal range is 0-7 where
+ *     0 is lowest priority. Type NLA_U8.
+ * @XSHM_A_LATENCY: Latency for channel, value:0 means low latency
+ *      and low bandwidth,
+ *      value 1 means high latency and high bandwidth. Type NLA_U8.
+ */
+enum XSHM_ATTRIBUTES {
+       __XSHM_A_FLAGS = 1,             /* Test flags: NLA_U32 */
+       XSHM_A_VERSION,
+       XSHM_A_SUB_VERSION,
+       XSHM_A_NAME,
+       XSHM_A_EXCL_GROUP,
+       XSHM_A_RX_CHANNEL,
+       XSHM_A_TX_CHANNEL,
+       XSHM_A_CHANNEL_SIZE,
+       XSHM_A_CHANNEL_BUFFERS,
+       XSHM_A_ALIGNMENT,
+       XSHM_A_MTU,
+       XSHM_A_PACKETS,
+       XSHM_A_PRIORITY,
+       XSHM_A_LATENCY,
+       __XSHM_A_MAX,
+};
+#define XSHM_A_MAX (__XSHM_A_MAX - 1)
+
+#endif /* XSHM_NL_H_ */
diff --git a/include/linux/xshm/xshm_pdev.h b/include/linux/xshm/xshm_pdev.h
new file mode 100644 (file)
index 0000000..0c223fb
--- /dev/null
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef XSHM_PDEV_H_
+#define XSHM_PDEV_H_
+#include <linux/platform_device.h>
+
+#define XSHM_NAMESZ 16
+
+/**
+ * struct xshm_udchannel - Unidirectional channel for xshm driver.
+ *
+ * @addr: Base address of the channel, address must be
+ *               a kernel logical address.
+ * @buffers: The number of buffers in the channel.
+ * @ch_size: The size of data area for the channel in one direction.
+ * @xfer_bit: GENI/O bit used when sending data (write pointer move)
+ * @xfer_done_bit: GENI/O bit used to indicate avilable buffers
+ *     (read pointer move).
+ * @alignment: Alignment used in payload protocol.
+ * @mtu: Maxium Transfer Unit used for packet in a buffer (Packet mode).
+ * @packets: Maxium number of packets in a buffer (Packet mode).
+ * @state: State of the device 0 - Closed, 1 - Open
+ * @read: Specify the read index for a channel. In packed mode
+ *     this index will at any time refer to the next buffer available for read.
+ *     In stream mode, this will be the read index in the ring-buffer.
+ * @write: Specify the write index for a channel. In packed mode
+ *     this index will at any time refer to the next buffer available for
+ *     write. In stream mode, this will be the write index in the ring-buffer.
+ * @buf_size: In packet mode, this array contains the size of each buffer.
+ *     In stream mode this is unused.
+ *
+ * This external shared memory channel configuration is exported from the
+ * platform device. It gives the platform driver the
+ * necessary information for running the shared memory protocol
+ * between modem and host.
+ *
+ * Note that two instances of this configuration is needed in order to run a
+ * bi-directional channel.
+ */
+struct xshm_udchannel {
+       void *addr;
+       u32 buffers;
+       u32 ch_size;
+       u8 xfer_done_bit;
+       u8 xfer_bit;
+       u32 mtu;
+       u32 alignment;
+       u32 packets;
+       __le32 *state;
+       __le32 *read;
+       __le32 *write;
+       __le32 *buf_size;
+/* private: */
+       struct kobject kobj; /* kobj must be located at the end */
+};
+
+/**
+ * struct xshm_channel - Channel definition for xshm driver.
+ * @rx: Configuration for RX channel
+ * @tx: Configuration for TX channel
+ * @excl_group: Only channels with the same group ID can be open simultaneously.
+ * @mode: Configuring type of channel PACKET(1), STREAM(2)
+ * @name: Name of interface.
+ * @priority: Priority of the channel.
+ * @latency: Latency of the channel.
+ */
+struct xshm_channel {
+       struct xshm_udchannel rx, tx;
+       u32 excl_group;
+       u32 mode;
+       char name[XSHM_NAMESZ];
+       u32 priority;
+       u32 latency;
+};
+
+#define XSHM_OPEN   1
+#define XSHM_CLOSED 0
+
+enum xshm_dev_state {
+       XSHM_DEV_CLOSED = 0,
+       XSHM_DEV_OPENING,
+       XSHM_DEV_OPEN,
+       XSHM_DEV_ACTIVE,
+};
+
+/**
+ * struct xshm_dev - Device definition for xshm platform device.
+ *
+ * @pdev: Platform device
+ * @cfg: Configuration for the Channel
+ * @state: State of the device: Closed - No user space client is using it,
+ *     Open - Open but no payload queued, Active - Payload queued on device.
+ *
+ * @open: The driver calls open() when channel is taken into use.
+ *     This function will fail if channel configuration is inconsistent,
+ *     or upon resource conflicts with other channels.
+ *
+ * @open_cb: The device calls open_cb() when is ready for use.
+ *
+ * @close: Called by the driver when a channel is no longer in use.
+ *
+ * @close_cb: The device calls close_cb() to notify about remote side closure.
+ *
+ * @ipc_tx_release_cb: This callback is triggered by the modem when a
+ *     transmit operation has completed and the buffer can be reused.
+ *     This function must be set by the driver upon device registration.
+ *     The "more" flag is set if ipc_rx_cb() call is coming immediately
+ *     after this call to ipc_tx_release_cb().
+ *
+ * @ipc_rx_cb: The driver gets this callback when the modem sends a buffer
+ *     from the modem. The driver must call ipc_rx_release()
+ *     to make the buffer available again when the received buffer has been
+ *     processed.
+ *     This function pointer must be set by the driver upon device
+ *     registration.
+ *
+ * @ipc_rx_release: Called by the driver when a RX operation has completed
+ *     and that the rx-buffer is released.
+ *
+ * @ipc_tx: Called by the driver when a TX buffer shall be sent to the modem.
+ *
+ * @driver_data: pointer to driver specific data.
+ *
+ * When communicating between two systems (e.g. modem and host),
+ * external shared memory can bused (e.g. C2C or DPRAM).
+ *
+ * This structure is used by the platform device representing the
+ * External Shared Memory.
+ *
+ * The this structure contains configuration data for the platform device and
+ * functions pointers for IPC communication between Linux host and modem.
+ * The external shared memory protocol memory e.g. C2C or DPRAM
+ * together is a IPC mechanism for transporting small commands such as
+ * Mailbox or GENI/O.
+ *
+ * This data structure is initiated by the xshm platform device, except
+ * for the functions ipc_rx_cb() and ipc_tx_release_cb(). They must be set by
+ * the platform driver when device is registering.
+ */
+
+struct xshm_dev {
+       struct platform_device pdev;
+       struct xshm_channel cfg;
+       enum xshm_dev_state state;
+       int (*open)(struct xshm_dev *dev);
+       void (*close)(struct xshm_dev *dev);
+       int (*ipc_rx_release)(struct xshm_dev *dev, bool more);
+       int (*ipc_tx)(struct xshm_dev *dev);
+       int (*open_cb)(void *drv);
+       void (*close_cb)(void *drv);
+       int (*ipc_rx_cb)(void *drv);
+       int (*ipc_tx_release_cb)(void *drv);
+       void *driver_data;
+       /* private: */
+       struct list_head node;
+       void *priv;
+};
+
+/**
+ * xshm_create_dev() - Create an instance of the xshm platform device.
+ * @shmdev: Device configuration data.
+ */
+int xshm_register_dev(struct xshm_dev *shmdev);
+
+/** xshm_ipc_ready() - Notify that GENO bit READY_FOR_IPC is set
+*/
+void xshm_ipc_ready(void);
+
+/** xshm_caif_ready() - Notify that GENO bit READY_FOR_CAIF is set
+*/
+void xshm_caif_ready(void);
+
+extern bool ready_for_ipc;
+extern bool ready_for_caif;
+void xshm_put(struct xshm_dev *shmdev);
+struct xshm_dev *xshmdev_alloc(void);
+void xshmdev_free(struct xshm_dev *dev);
+void remove_devices(void);
+void close_devices(void);
+void xshm_boot_exit(void);
+int xshm_boot_init(void);
+void xshm_reset(void);
+void genio_ipc_ready_cb(void);
+#endif