]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/rds/ib_rdma.c
Merge remote-tracking branch 'ipsec/master'
[karo-tx-linux.git] / net / rds / ib_rdma.c
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/rculist.h>
36 #include <linux/llist.h>
37
38 #include "rds.h"
39 #include "ib.h"
40
41 static DEFINE_PER_CPU(unsigned long, clean_list_grace);
42 #define CLEAN_LIST_BUSY_BIT 0
43
44 /*
45  * This is stored as mr->r_trans_private.
46  */
47 struct rds_ib_mr {
48         struct rds_ib_device    *device;
49         struct rds_ib_mr_pool   *pool;
50         struct ib_fmr           *fmr;
51
52         struct llist_node       llnode;
53
54         /* unmap_list is for freeing */
55         struct list_head        unmap_list;
56         unsigned int            remap_count;
57
58         struct scatterlist      *sg;
59         unsigned int            sg_len;
60         u64                     *dma;
61         int                     sg_dma_len;
62 };
63
64 /*
65  * Our own little FMR pool
66  */
67 struct rds_ib_mr_pool {
68         unsigned int            pool_type;
69         struct mutex            flush_lock;             /* serialize fmr invalidate */
70         struct delayed_work     flush_worker;           /* flush worker */
71
72         atomic_t                item_count;             /* total # of MRs */
73         atomic_t                dirty_count;            /* # dirty of MRs */
74
75         struct llist_head       drop_list;              /* MRs that have reached their max_maps limit */
76         struct llist_head       free_list;              /* unused MRs */
77         struct llist_head       clean_list;             /* global unused & unamapped MRs */
78         wait_queue_head_t       flush_wait;
79
80         atomic_t                free_pinned;            /* memory pinned by free MRs */
81         unsigned long           max_items;
82         unsigned long           max_items_soft;
83         unsigned long           max_free_pinned;
84         struct ib_fmr_attr      fmr_attr;
85 };
86
87 static struct workqueue_struct *rds_ib_fmr_wq;
88
89 int rds_ib_fmr_init(void)
90 {
91         rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
92         if (!rds_ib_fmr_wq)
93                 return -ENOMEM;
94         return 0;
95 }
96
97 /* By the time this is called all the IB devices should have been torn down and
98  * had their pools freed.  As each pool is freed its work struct is waited on,
99  * so the pool flushing work queue should be idle by the time we get here.
100  */
101 void rds_ib_fmr_exit(void)
102 {
103         destroy_workqueue(rds_ib_fmr_wq);
104 }
105
106 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
107 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
108 static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
109
110 static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
111 {
112         struct rds_ib_device *rds_ibdev;
113         struct rds_ib_ipaddr *i_ipaddr;
114
115         rcu_read_lock();
116         list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
117                 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
118                         if (i_ipaddr->ipaddr == ipaddr) {
119                                 atomic_inc(&rds_ibdev->refcount);
120                                 rcu_read_unlock();
121                                 return rds_ibdev;
122                         }
123                 }
124         }
125         rcu_read_unlock();
126
127         return NULL;
128 }
129
130 static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
131 {
132         struct rds_ib_ipaddr *i_ipaddr;
133
134         i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
135         if (!i_ipaddr)
136                 return -ENOMEM;
137
138         i_ipaddr->ipaddr = ipaddr;
139
140         spin_lock_irq(&rds_ibdev->spinlock);
141         list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
142         spin_unlock_irq(&rds_ibdev->spinlock);
143
144         return 0;
145 }
146
147 static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
148 {
149         struct rds_ib_ipaddr *i_ipaddr;
150         struct rds_ib_ipaddr *to_free = NULL;
151
152
153         spin_lock_irq(&rds_ibdev->spinlock);
154         list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
155                 if (i_ipaddr->ipaddr == ipaddr) {
156                         list_del_rcu(&i_ipaddr->list);
157                         to_free = i_ipaddr;
158                         break;
159                 }
160         }
161         spin_unlock_irq(&rds_ibdev->spinlock);
162
163         if (to_free)
164                 kfree_rcu(to_free, rcu);
165 }
166
167 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
168 {
169         struct rds_ib_device *rds_ibdev_old;
170
171         rds_ibdev_old = rds_ib_get_device(ipaddr);
172         if (!rds_ibdev_old)
173                 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
174
175         if (rds_ibdev_old != rds_ibdev) {
176                 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
177                 rds_ib_dev_put(rds_ibdev_old);
178                 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
179         }
180         rds_ib_dev_put(rds_ibdev_old);
181
182         return 0;
183 }
184
185 void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
186 {
187         struct rds_ib_connection *ic = conn->c_transport_data;
188
189         /* conn was previously on the nodev_conns_list */
190         spin_lock_irq(&ib_nodev_conns_lock);
191         BUG_ON(list_empty(&ib_nodev_conns));
192         BUG_ON(list_empty(&ic->ib_node));
193         list_del(&ic->ib_node);
194
195         spin_lock(&rds_ibdev->spinlock);
196         list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
197         spin_unlock(&rds_ibdev->spinlock);
198         spin_unlock_irq(&ib_nodev_conns_lock);
199
200         ic->rds_ibdev = rds_ibdev;
201         atomic_inc(&rds_ibdev->refcount);
202 }
203
204 void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
205 {
206         struct rds_ib_connection *ic = conn->c_transport_data;
207
208         /* place conn on nodev_conns_list */
209         spin_lock(&ib_nodev_conns_lock);
210
211         spin_lock_irq(&rds_ibdev->spinlock);
212         BUG_ON(list_empty(&ic->ib_node));
213         list_del(&ic->ib_node);
214         spin_unlock_irq(&rds_ibdev->spinlock);
215
216         list_add_tail(&ic->ib_node, &ib_nodev_conns);
217
218         spin_unlock(&ib_nodev_conns_lock);
219
220         ic->rds_ibdev = NULL;
221         rds_ib_dev_put(rds_ibdev);
222 }
223
224 void rds_ib_destroy_nodev_conns(void)
225 {
226         struct rds_ib_connection *ic, *_ic;
227         LIST_HEAD(tmp_list);
228
229         /* avoid calling conn_destroy with irqs off */
230         spin_lock_irq(&ib_nodev_conns_lock);
231         list_splice(&ib_nodev_conns, &tmp_list);
232         spin_unlock_irq(&ib_nodev_conns_lock);
233
234         list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
235                 rds_conn_destroy(ic->conn);
236 }
237
238 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
239                                              int pool_type)
240 {
241         struct rds_ib_mr_pool *pool;
242
243         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
244         if (!pool)
245                 return ERR_PTR(-ENOMEM);
246
247         pool->pool_type = pool_type;
248         init_llist_head(&pool->free_list);
249         init_llist_head(&pool->drop_list);
250         init_llist_head(&pool->clean_list);
251         mutex_init(&pool->flush_lock);
252         init_waitqueue_head(&pool->flush_wait);
253         INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
254
255         if (pool_type == RDS_IB_MR_1M_POOL) {
256                 /* +1 allows for unaligned MRs */
257                 pool->fmr_attr.max_pages = RDS_FMR_1M_MSG_SIZE + 1;
258                 pool->max_items = RDS_FMR_1M_POOL_SIZE;
259         } else {
260                 /* pool_type == RDS_IB_MR_8K_POOL */
261                 pool->fmr_attr.max_pages = RDS_FMR_8K_MSG_SIZE + 1;
262                 pool->max_items = RDS_FMR_8K_POOL_SIZE;
263         }
264
265         pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4;
266         pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
267         pool->fmr_attr.page_shift = PAGE_SHIFT;
268         pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
269
270         return pool;
271 }
272
273 void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
274 {
275         struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
276
277         iinfo->rdma_mr_max = pool_1m->max_items;
278         iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages;
279 }
280
281 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
282 {
283         cancel_delayed_work_sync(&pool->flush_worker);
284         rds_ib_flush_mr_pool(pool, 1, NULL);
285         WARN_ON(atomic_read(&pool->item_count));
286         WARN_ON(atomic_read(&pool->free_pinned));
287         kfree(pool);
288 }
289
290 static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
291 {
292         struct rds_ib_mr *ibmr = NULL;
293         struct llist_node *ret;
294         unsigned long *flag;
295
296         preempt_disable();
297         flag = this_cpu_ptr(&clean_list_grace);
298         set_bit(CLEAN_LIST_BUSY_BIT, flag);
299         ret = llist_del_first(&pool->clean_list);
300         if (ret)
301                 ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
302
303         clear_bit(CLEAN_LIST_BUSY_BIT, flag);
304         preempt_enable();
305         return ibmr;
306 }
307
308 static inline void wait_clean_list_grace(void)
309 {
310         int cpu;
311         unsigned long *flag;
312
313         for_each_online_cpu(cpu) {
314                 flag = &per_cpu(clean_list_grace, cpu);
315                 while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
316                         cpu_relax();
317         }
318 }
319
320 static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev,
321                                           int npages)
322 {
323         struct rds_ib_mr_pool *pool;
324         struct rds_ib_mr *ibmr = NULL;
325         int err = 0, iter = 0;
326
327         if (npages <= RDS_FMR_8K_MSG_SIZE)
328                 pool = rds_ibdev->mr_8k_pool;
329         else
330                 pool = rds_ibdev->mr_1m_pool;
331
332         if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
333                 queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
334
335         /* Switch pools if one of the pool is reaching upper limit */
336         if (atomic_read(&pool->dirty_count) >=  pool->max_items * 9 / 10) {
337                 if (pool->pool_type == RDS_IB_MR_8K_POOL)
338                         pool = rds_ibdev->mr_1m_pool;
339                 else
340                         pool = rds_ibdev->mr_8k_pool;
341         }
342
343         while (1) {
344                 ibmr = rds_ib_reuse_fmr(pool);
345                 if (ibmr)
346                         return ibmr;
347
348                 /* No clean MRs - now we have the choice of either
349                  * allocating a fresh MR up to the limit imposed by the
350                  * driver, or flush any dirty unused MRs.
351                  * We try to avoid stalling in the send path if possible,
352                  * so we allocate as long as we're allowed to.
353                  *
354                  * We're fussy with enforcing the FMR limit, though. If the driver
355                  * tells us we can't use more than N fmrs, we shouldn't start
356                  * arguing with it */
357                 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
358                         break;
359
360                 atomic_dec(&pool->item_count);
361
362                 if (++iter > 2) {
363                         if (pool->pool_type == RDS_IB_MR_8K_POOL)
364                                 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
365                         else
366                                 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
367                         return ERR_PTR(-EAGAIN);
368                 }
369
370                 /* We do have some empty MRs. Flush them out. */
371                 if (pool->pool_type == RDS_IB_MR_8K_POOL)
372                         rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait);
373                 else
374                         rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait);
375                 rds_ib_flush_mr_pool(pool, 0, &ibmr);
376                 if (ibmr)
377                         return ibmr;
378         }
379
380         ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev));
381         if (!ibmr) {
382                 err = -ENOMEM;
383                 goto out_no_cigar;
384         }
385
386         ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
387                         (IB_ACCESS_LOCAL_WRITE |
388                          IB_ACCESS_REMOTE_READ |
389                          IB_ACCESS_REMOTE_WRITE|
390                          IB_ACCESS_REMOTE_ATOMIC),
391                         &pool->fmr_attr);
392         if (IS_ERR(ibmr->fmr)) {
393                 err = PTR_ERR(ibmr->fmr);
394                 ibmr->fmr = NULL;
395                 printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err);
396                 goto out_no_cigar;
397         }
398
399         ibmr->pool = pool;
400         if (pool->pool_type == RDS_IB_MR_8K_POOL)
401                 rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
402         else
403                 rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
404
405         return ibmr;
406
407 out_no_cigar:
408         if (ibmr) {
409                 if (ibmr->fmr)
410                         ib_dealloc_fmr(ibmr->fmr);
411                 kfree(ibmr);
412         }
413         atomic_dec(&pool->item_count);
414         return ERR_PTR(err);
415 }
416
417 static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
418                struct scatterlist *sg, unsigned int nents)
419 {
420         struct ib_device *dev = rds_ibdev->dev;
421         struct scatterlist *scat = sg;
422         u64 io_addr = 0;
423         u64 *dma_pages;
424         u32 len;
425         int page_cnt, sg_dma_len;
426         int i, j;
427         int ret;
428
429         sg_dma_len = ib_dma_map_sg(dev, sg, nents,
430                                  DMA_BIDIRECTIONAL);
431         if (unlikely(!sg_dma_len)) {
432                 printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
433                 return -EBUSY;
434         }
435
436         len = 0;
437         page_cnt = 0;
438
439         for (i = 0; i < sg_dma_len; ++i) {
440                 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
441                 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
442
443                 if (dma_addr & ~PAGE_MASK) {
444                         if (i > 0)
445                                 return -EINVAL;
446                         else
447                                 ++page_cnt;
448                 }
449                 if ((dma_addr + dma_len) & ~PAGE_MASK) {
450                         if (i < sg_dma_len - 1)
451                                 return -EINVAL;
452                         else
453                                 ++page_cnt;
454                 }
455
456                 len += dma_len;
457         }
458
459         page_cnt += len >> PAGE_SHIFT;
460         if (page_cnt > ibmr->pool->fmr_attr.max_pages)
461                 return -EINVAL;
462
463         dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
464                                  rdsibdev_to_node(rds_ibdev));
465         if (!dma_pages)
466                 return -ENOMEM;
467
468         page_cnt = 0;
469         for (i = 0; i < sg_dma_len; ++i) {
470                 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
471                 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
472
473                 for (j = 0; j < dma_len; j += PAGE_SIZE)
474                         dma_pages[page_cnt++] =
475                                 (dma_addr & PAGE_MASK) + j;
476         }
477
478         ret = ib_map_phys_fmr(ibmr->fmr,
479                                    dma_pages, page_cnt, io_addr);
480         if (ret)
481                 goto out;
482
483         /* Success - we successfully remapped the MR, so we can
484          * safely tear down the old mapping. */
485         rds_ib_teardown_mr(ibmr);
486
487         ibmr->sg = scat;
488         ibmr->sg_len = nents;
489         ibmr->sg_dma_len = sg_dma_len;
490         ibmr->remap_count++;
491
492         if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
493                 rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
494         else
495                 rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
496         ret = 0;
497
498 out:
499         kfree(dma_pages);
500
501         return ret;
502 }
503
504 void rds_ib_sync_mr(void *trans_private, int direction)
505 {
506         struct rds_ib_mr *ibmr = trans_private;
507         struct rds_ib_device *rds_ibdev = ibmr->device;
508
509         switch (direction) {
510         case DMA_FROM_DEVICE:
511                 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
512                         ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
513                 break;
514         case DMA_TO_DEVICE:
515                 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
516                         ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
517                 break;
518         }
519 }
520
521 static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
522 {
523         struct rds_ib_device *rds_ibdev = ibmr->device;
524
525         if (ibmr->sg_dma_len) {
526                 ib_dma_unmap_sg(rds_ibdev->dev,
527                                 ibmr->sg, ibmr->sg_len,
528                                 DMA_BIDIRECTIONAL);
529                 ibmr->sg_dma_len = 0;
530         }
531
532         /* Release the s/g list */
533         if (ibmr->sg_len) {
534                 unsigned int i;
535
536                 for (i = 0; i < ibmr->sg_len; ++i) {
537                         struct page *page = sg_page(&ibmr->sg[i]);
538
539                         /* FIXME we need a way to tell a r/w MR
540                          * from a r/o MR */
541                         WARN_ON(!page->mapping && irqs_disabled());
542                         set_page_dirty(page);
543                         put_page(page);
544                 }
545                 kfree(ibmr->sg);
546
547                 ibmr->sg = NULL;
548                 ibmr->sg_len = 0;
549         }
550 }
551
552 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
553 {
554         unsigned int pinned = ibmr->sg_len;
555
556         __rds_ib_teardown_mr(ibmr);
557         if (pinned) {
558                 struct rds_ib_mr_pool *pool = ibmr->pool;
559
560                 atomic_sub(pinned, &pool->free_pinned);
561         }
562 }
563
564 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
565 {
566         unsigned int item_count;
567
568         item_count = atomic_read(&pool->item_count);
569         if (free_all)
570                 return item_count;
571
572         return 0;
573 }
574
575 /*
576  * given an llist of mrs, put them all into the list_head for more processing
577  */
578 static unsigned int llist_append_to_list(struct llist_head *llist,
579                                          struct list_head *list)
580 {
581         struct rds_ib_mr *ibmr;
582         struct llist_node *node;
583         struct llist_node *next;
584         unsigned int count = 0;
585
586         node = llist_del_all(llist);
587         while (node) {
588                 next = node->next;
589                 ibmr = llist_entry(node, struct rds_ib_mr, llnode);
590                 list_add_tail(&ibmr->unmap_list, list);
591                 node = next;
592                 count++;
593         }
594         return count;
595 }
596
597 /*
598  * this takes a list head of mrs and turns it into linked llist nodes
599  * of clusters.  Each cluster has linked llist nodes of
600  * MR_CLUSTER_SIZE mrs that are ready for reuse.
601  */
602 static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
603                                 struct list_head *list,
604                                 struct llist_node **nodes_head,
605                                 struct llist_node **nodes_tail)
606 {
607         struct rds_ib_mr *ibmr;
608         struct llist_node *cur = NULL;
609         struct llist_node **next = nodes_head;
610
611         list_for_each_entry(ibmr, list, unmap_list) {
612                 cur = &ibmr->llnode;
613                 *next = cur;
614                 next = &cur->next;
615         }
616         *next = NULL;
617         *nodes_tail = cur;
618 }
619
620 /*
621  * Flush our pool of MRs.
622  * At a minimum, all currently unused MRs are unmapped.
623  * If the number of MRs allocated exceeds the limit, we also try
624  * to free as many MRs as needed to get back to this limit.
625  */
626 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
627                                 int free_all, struct rds_ib_mr **ibmr_ret)
628 {
629         struct rds_ib_mr *ibmr, *next;
630         struct llist_node *clean_nodes;
631         struct llist_node *clean_tail;
632         LIST_HEAD(unmap_list);
633         LIST_HEAD(fmr_list);
634         unsigned long unpinned = 0;
635         unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
636         int ret = 0;
637
638         if (pool->pool_type == RDS_IB_MR_8K_POOL)
639                 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush);
640         else
641                 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush);
642
643         if (ibmr_ret) {
644                 DEFINE_WAIT(wait);
645                 while (!mutex_trylock(&pool->flush_lock)) {
646                         ibmr = rds_ib_reuse_fmr(pool);
647                         if (ibmr) {
648                                 *ibmr_ret = ibmr;
649                                 finish_wait(&pool->flush_wait, &wait);
650                                 goto out_nolock;
651                         }
652
653                         prepare_to_wait(&pool->flush_wait, &wait,
654                                         TASK_UNINTERRUPTIBLE);
655                         if (llist_empty(&pool->clean_list))
656                                 schedule();
657
658                         ibmr = rds_ib_reuse_fmr(pool);
659                         if (ibmr) {
660                                 *ibmr_ret = ibmr;
661                                 finish_wait(&pool->flush_wait, &wait);
662                                 goto out_nolock;
663                         }
664                 }
665                 finish_wait(&pool->flush_wait, &wait);
666         } else
667                 mutex_lock(&pool->flush_lock);
668
669         if (ibmr_ret) {
670                 ibmr = rds_ib_reuse_fmr(pool);
671                 if (ibmr) {
672                         *ibmr_ret = ibmr;
673                         goto out;
674                 }
675         }
676
677         /* Get the list of all MRs to be dropped. Ordering matters -
678          * we want to put drop_list ahead of free_list.
679          */
680         dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
681         dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
682         if (free_all)
683                 llist_append_to_list(&pool->clean_list, &unmap_list);
684
685         free_goal = rds_ib_flush_goal(pool, free_all);
686
687         if (list_empty(&unmap_list))
688                 goto out;
689
690         /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
691         list_for_each_entry(ibmr, &unmap_list, unmap_list)
692                 list_add(&ibmr->fmr->list, &fmr_list);
693
694         ret = ib_unmap_fmr(&fmr_list);
695         if (ret)
696                 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
697
698         /* Now we can destroy the DMA mapping and unpin any pages */
699         list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) {
700                 unpinned += ibmr->sg_len;
701                 __rds_ib_teardown_mr(ibmr);
702                 if (nfreed < free_goal ||
703                     ibmr->remap_count >= pool->fmr_attr.max_maps) {
704                         if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
705                                 rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
706                         else
707                                 rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
708                         list_del(&ibmr->unmap_list);
709                         ib_dealloc_fmr(ibmr->fmr);
710                         kfree(ibmr);
711                         nfreed++;
712                 }
713         }
714
715         if (!list_empty(&unmap_list)) {
716                 /* we have to make sure that none of the things we're about
717                  * to put on the clean list would race with other cpus trying
718                  * to pull items off.  The llist would explode if we managed to
719                  * remove something from the clean list and then add it back again
720                  * while another CPU was spinning on that same item in llist_del_first.
721                  *
722                  * This is pretty unlikely, but just in case  wait for an llist grace period
723                  * here before adding anything back into the clean list.
724                  */
725                 wait_clean_list_grace();
726
727                 list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
728                 if (ibmr_ret)
729                         *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
730
731                 /* more than one entry in llist nodes */
732                 if (clean_nodes->next)
733                         llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
734
735         }
736
737         atomic_sub(unpinned, &pool->free_pinned);
738         atomic_sub(dirty_to_clean, &pool->dirty_count);
739         atomic_sub(nfreed, &pool->item_count);
740
741 out:
742         mutex_unlock(&pool->flush_lock);
743         if (waitqueue_active(&pool->flush_wait))
744                 wake_up(&pool->flush_wait);
745 out_nolock:
746         return ret;
747 }
748
749 static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
750 {
751         struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
752
753         rds_ib_flush_mr_pool(pool, 0, NULL);
754 }
755
756 void rds_ib_free_mr(void *trans_private, int invalidate)
757 {
758         struct rds_ib_mr *ibmr = trans_private;
759         struct rds_ib_mr_pool *pool = ibmr->pool;
760         struct rds_ib_device *rds_ibdev = ibmr->device;
761
762         rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
763
764         /* Return it to the pool's free list */
765         if (ibmr->remap_count >= pool->fmr_attr.max_maps)
766                 llist_add(&ibmr->llnode, &pool->drop_list);
767         else
768                 llist_add(&ibmr->llnode, &pool->free_list);
769
770         atomic_add(ibmr->sg_len, &pool->free_pinned);
771         atomic_inc(&pool->dirty_count);
772
773         /* If we've pinned too many pages, request a flush */
774         if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
775             atomic_read(&pool->dirty_count) >= pool->max_items / 5)
776                 queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
777
778         if (invalidate) {
779                 if (likely(!in_interrupt())) {
780                         rds_ib_flush_mr_pool(pool, 0, NULL);
781                 } else {
782                         /* We get here if the user created a MR marked
783                          * as use_once and invalidate at the same time.
784                          */
785                         queue_delayed_work(rds_ib_fmr_wq,
786                                            &pool->flush_worker, 10);
787                 }
788         }
789
790         rds_ib_dev_put(rds_ibdev);
791 }
792
793 void rds_ib_flush_mrs(void)
794 {
795         struct rds_ib_device *rds_ibdev;
796
797         down_read(&rds_ib_devices_lock);
798         list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
799                 if (rds_ibdev->mr_8k_pool)
800                         rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL);
801
802                 if (rds_ibdev->mr_1m_pool)
803                         rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL);
804         }
805         up_read(&rds_ib_devices_lock);
806 }
807
808 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
809                     struct rds_sock *rs, u32 *key_ret)
810 {
811         struct rds_ib_device *rds_ibdev;
812         struct rds_ib_mr *ibmr = NULL;
813         int ret;
814
815         rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
816         if (!rds_ibdev) {
817                 ret = -ENODEV;
818                 goto out;
819         }
820
821         if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
822                 ret = -ENODEV;
823                 goto out;
824         }
825
826         ibmr = rds_ib_alloc_fmr(rds_ibdev, nents);
827         if (IS_ERR(ibmr)) {
828                 rds_ib_dev_put(rds_ibdev);
829                 return ibmr;
830         }
831
832         ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
833         if (ret == 0)
834                 *key_ret = ibmr->fmr->rkey;
835         else
836                 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
837
838         ibmr->device = rds_ibdev;
839         rds_ibdev = NULL;
840
841  out:
842         if (ret) {
843                 if (ibmr)
844                         rds_ib_free_mr(ibmr, 0);
845                 ibmr = ERR_PTR(ret);
846         }
847         if (rds_ibdev)
848                 rds_ib_dev_put(rds_ibdev);
849         return ibmr;
850 }
851