]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - net/rds/iw_rdma.c
Merge remote-tracking branch 'rdma/for-next'
[karo-tx-linux.git] / net / rds / iw_rdma.c
index d3d4454ffc84c6603397976e8c027b62c3300fbe..b09a40c1adceebf170617da3826c82002b04807a 100644 (file)
@@ -47,7 +47,6 @@ struct rds_iw_mr {
        struct rdma_cm_id       *cm_id;
 
        struct ib_mr    *mr;
-       struct ib_fast_reg_page_list *page_list;
 
        struct rds_iw_mapping   mapping;
        unsigned char           remap_count;
@@ -77,8 +76,8 @@ struct rds_iw_mr_pool {
 
 static void rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all);
 static void rds_iw_mr_pool_flush_worker(struct work_struct *work);
-static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
-static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
+static int rds_iw_init_reg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
+static int rds_iw_map_reg(struct rds_iw_mr_pool *pool,
                          struct rds_iw_mr *ibmr,
                          struct scatterlist *sg, unsigned int nents);
 static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
@@ -258,19 +257,18 @@ static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg,
        sg->bytes = 0;
 }
 
-static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev,
-                       struct rds_iw_scatterlist *sg)
+static int rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev,
+                                 struct rds_iw_scatterlist *sg)
 {
        struct ib_device *dev = rds_iwdev->dev;
-       u64 *dma_pages = NULL;
-       int i, j, ret;
+       int i, ret;
 
        WARN_ON(sg->dma_len);
 
        sg->dma_len = ib_dma_map_sg(dev, sg->list, sg->len, DMA_BIDIRECTIONAL);
        if (unlikely(!sg->dma_len)) {
                printk(KERN_WARNING "RDS/IW: dma_map_sg failed!\n");
-               return ERR_PTR(-EBUSY);
+               return -EBUSY;
        }
 
        sg->bytes = 0;
@@ -303,31 +301,14 @@ static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev,
        if (sg->dma_npages > fastreg_message_size)
                goto out_unmap;
 
-       dma_pages = kmalloc(sizeof(u64) * sg->dma_npages, GFP_ATOMIC);
-       if (!dma_pages) {
-               ret = -ENOMEM;
-               goto out_unmap;
-       }
 
-       for (i = j = 0; i < sg->dma_len; ++i) {
-               unsigned int dma_len = ib_sg_dma_len(dev, &sg->list[i]);
-               u64 dma_addr = ib_sg_dma_address(dev, &sg->list[i]);
-               u64 end_addr;
 
-               end_addr = dma_addr + dma_len;
-               dma_addr &= ~PAGE_MASK;
-               for (; dma_addr < end_addr; dma_addr += PAGE_SIZE)
-                       dma_pages[j++] = dma_addr;
-               BUG_ON(j > sg->dma_npages);
-       }
-
-       return dma_pages;
+       return 0;
 
 out_unmap:
        ib_dma_unmap_sg(rds_iwdev->dev, sg->list, sg->len, DMA_BIDIRECTIONAL);
        sg->dma_len = 0;
-       kfree(dma_pages);
-       return ERR_PTR(ret);
+       return ret;
 }
 
 
@@ -440,7 +421,7 @@ static struct rds_iw_mr *rds_iw_alloc_mr(struct rds_iw_device *rds_iwdev)
        INIT_LIST_HEAD(&ibmr->mapping.m_list);
        ibmr->mapping.m_mr = ibmr;
 
-       err = rds_iw_init_fastreg(pool, ibmr);
+       err = rds_iw_init_reg(pool, ibmr);
        if (err)
                goto out_no_cigar;
 
@@ -620,7 +601,7 @@ void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
        ibmr->cm_id = cm_id;
        ibmr->device = rds_iwdev;
 
-       ret = rds_iw_map_fastreg(rds_iwdev->mr_pool, ibmr, sg, nents);
+       ret = rds_iw_map_reg(rds_iwdev->mr_pool, ibmr, sg, nents);
        if (ret == 0)
                *key_ret = ibmr->mr->rkey;
        else
@@ -636,7 +617,7 @@ out:
 }
 
 /*
- * iWARP fastreg handling
+ * iWARP reg handling
  *
  * The life cycle of a fastreg registration is a bit different from
  * FMRs.
@@ -648,7 +629,7 @@ out:
  * This creates a bit of a problem for us, as we do not have the destination
  * IP in GET_MR, so the connection must be setup prior to the GET_MR call for
  * RDMA to be correctly setup.  If a fastreg request is present, rds_iw_xmit
- * will try to queue a LOCAL_INV (if needed) and a FAST_REG_MR work request
+ * will try to queue a LOCAL_INV (if needed) and a REG_MR work request
  * before queuing the SEND. When completions for these arrive, they are
  * dispatched to the MR has a bit set showing that RDMa can be performed.
  *
@@ -657,11 +638,10 @@ out:
  * The expectation there is that this invalidation step includes ALL
  * PREVIOUSLY FREED MRs.
  */
-static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool,
-                               struct rds_iw_mr *ibmr)
+static int rds_iw_init_reg(struct rds_iw_mr_pool *pool,
+                          struct rds_iw_mr *ibmr)
 {
        struct rds_iw_device *rds_iwdev = pool->device;
-       struct ib_fast_reg_page_list *page_list = NULL;
        struct ib_mr *mr;
        int err;
 
@@ -674,55 +654,44 @@ static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool,
                return err;
        }
 
-       /* FIXME - this is overkill, but mapping->m_sg.dma_len/mapping->m_sg.dma_npages
-        * is not filled in.
-        */
-       page_list = ib_alloc_fast_reg_page_list(rds_iwdev->dev, pool->max_message_size);
-       if (IS_ERR(page_list)) {
-               err = PTR_ERR(page_list);
-
-               printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_page_list failed (err=%d)\n", err);
-               ib_dereg_mr(mr);
-               return err;
-       }
-
-       ibmr->page_list = page_list;
        ibmr->mr = mr;
        return 0;
 }
 
-static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping)
+static int rds_iw_rdma_reg_mr(struct rds_iw_mapping *mapping)
 {
        struct rds_iw_mr *ibmr = mapping->m_mr;
-       struct ib_send_wr f_wr, *failed_wr;
-       int ret;
+       struct rds_iw_scatterlist *m_sg = &mapping->m_sg;
+       struct ib_reg_wr reg_wr;
+       struct ib_send_wr *failed_wr;
+       int ret, n;
+
+       n = ib_map_mr_sg_zbva(ibmr->mr, m_sg->list, m_sg->len, PAGE_SIZE);
+       if (unlikely(n != m_sg->len))
+               return n < 0 ? n : -EINVAL;
+
+       reg_wr.wr.next = NULL;
+       reg_wr.wr.opcode = IB_WR_REG_MR;
+       reg_wr.wr.wr_id = RDS_IW_REG_WR_ID;
+       reg_wr.wr.num_sge = 0;
+       reg_wr.mr = ibmr->mr;
+       reg_wr.key = mapping->m_rkey;
+       reg_wr.access = IB_ACCESS_LOCAL_WRITE |
+                       IB_ACCESS_REMOTE_READ |
+                       IB_ACCESS_REMOTE_WRITE;
 
        /*
-        * Perform a WR for the fast_reg_mr. Each individual page
+        * Perform a WR for the reg_mr. Each individual page
         * in the sg list is added to the fast reg page list and placed
-        * inside the fast_reg_mr WR.  The key used is a rolling 8bit
+        * inside the reg_mr WR.  The key used is a rolling 8bit
         * counter, which should guarantee uniqueness.
         */
        ib_update_fast_reg_key(ibmr->mr, ibmr->remap_count++);
        mapping->m_rkey = ibmr->mr->rkey;
 
-       memset(&f_wr, 0, sizeof(f_wr));
-       f_wr.wr_id = RDS_IW_FAST_REG_WR_ID;
-       f_wr.opcode = IB_WR_FAST_REG_MR;
-       f_wr.wr.fast_reg.length = mapping->m_sg.bytes;
-       f_wr.wr.fast_reg.rkey = mapping->m_rkey;
-       f_wr.wr.fast_reg.page_list = ibmr->page_list;
-       f_wr.wr.fast_reg.page_list_len = mapping->m_sg.dma_len;
-       f_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
-       f_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE |
-                               IB_ACCESS_REMOTE_READ |
-                               IB_ACCESS_REMOTE_WRITE;
-       f_wr.wr.fast_reg.iova_start = 0;
-       f_wr.send_flags = IB_SEND_SIGNALED;
-
-       failed_wr = &f_wr;
-       ret = ib_post_send(ibmr->cm_id->qp, &f_wr, &failed_wr);
-       BUG_ON(failed_wr != &f_wr);
+       failed_wr = &reg_wr.wr;
+       ret = ib_post_send(ibmr->cm_id->qp, &reg_wr.wr, &failed_wr);
+       BUG_ON(failed_wr != &reg_wr.wr);
        if (ret)
                printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n",
                        __func__, __LINE__, ret);
@@ -754,21 +723,20 @@ out:
        return ret;
 }
 
-static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
-                       struct rds_iw_mr *ibmr,
-                       struct scatterlist *sg,
-                       unsigned int sg_len)
+static int rds_iw_map_reg(struct rds_iw_mr_pool *pool,
+                         struct rds_iw_mr *ibmr,
+                         struct scatterlist *sg,
+                         unsigned int sg_len)
 {
        struct rds_iw_device *rds_iwdev = pool->device;
        struct rds_iw_mapping *mapping = &ibmr->mapping;
        u64 *dma_pages;
-       int i, ret = 0;
+       int ret = 0;
 
        rds_iw_set_scatterlist(&mapping->m_sg, sg, sg_len);
 
-       dma_pages = rds_iw_map_scatterlist(rds_iwdev, &mapping->m_sg);
-       if (IS_ERR(dma_pages)) {
-               ret = PTR_ERR(dma_pages);
+       ret = rds_iw_map_scatterlist(rds_iwdev, &mapping->m_sg);
+       if (ret) {
                dma_pages = NULL;
                goto out;
        }
@@ -778,10 +746,7 @@ static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
                goto out;
        }
 
-       for (i = 0; i < mapping->m_sg.dma_npages; ++i)
-               ibmr->page_list->page_list[i] = dma_pages[i];
-
-       ret = rds_iw_rdma_build_fastreg(mapping);
+       ret = rds_iw_rdma_reg_mr(mapping);
        if (ret)
                goto out;
 
@@ -867,8 +832,6 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool,
                struct rds_iw_mr *ibmr)
 {
-       if (ibmr->page_list)
-               ib_free_fast_reg_page_list(ibmr->page_list);
        if (ibmr->mr)
                ib_dereg_mr(ibmr->mr);
 }