]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/infiniband/ulp/srp/ib_srp.c
thermal: add the note for set_trip_temp
[karo-tx-linux.git] / drivers / infiniband / ulp / srp / ib_srp.c
1 /*
2  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <rdma/ib_cache.h>
44
45 #include <linux/atomic.h>
46
47 #include <scsi/scsi.h>
48 #include <scsi/scsi_device.h>
49 #include <scsi/scsi_dbg.h>
50 #include <scsi/scsi_tcq.h>
51 #include <scsi/srp.h>
52 #include <scsi/scsi_transport_srp.h>
53
54 #include "ib_srp.h"
55
56 #define DRV_NAME        "ib_srp"
57 #define PFX             DRV_NAME ": "
58 #define DRV_VERSION     "2.0"
59 #define DRV_RELDATE     "July 26, 2015"
60
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
64 MODULE_VERSION(DRV_VERSION);
65 MODULE_INFO(release_date, DRV_RELDATE);
66
67 static unsigned int srp_sg_tablesize;
68 static unsigned int cmd_sg_entries;
69 static unsigned int indirect_sg_entries;
70 static bool allow_ext_sg;
71 static bool prefer_fr = true;
72 static bool register_always = true;
73 static bool never_register;
74 static int topspin_workarounds = 1;
75
76 module_param(srp_sg_tablesize, uint, 0444);
77 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
78
79 module_param(cmd_sg_entries, uint, 0444);
80 MODULE_PARM_DESC(cmd_sg_entries,
81                  "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
82
83 module_param(indirect_sg_entries, uint, 0444);
84 MODULE_PARM_DESC(indirect_sg_entries,
85                  "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
86
87 module_param(allow_ext_sg, bool, 0444);
88 MODULE_PARM_DESC(allow_ext_sg,
89                   "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
90
91 module_param(topspin_workarounds, int, 0444);
92 MODULE_PARM_DESC(topspin_workarounds,
93                  "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
94
95 module_param(prefer_fr, bool, 0444);
96 MODULE_PARM_DESC(prefer_fr,
97 "Whether to use fast registration if both FMR and fast registration are supported");
98
99 module_param(register_always, bool, 0444);
100 MODULE_PARM_DESC(register_always,
101                  "Use memory registration even for contiguous memory regions");
102
103 module_param(never_register, bool, 0444);
104 MODULE_PARM_DESC(never_register, "Never register memory");
105
106 static const struct kernel_param_ops srp_tmo_ops;
107
108 static int srp_reconnect_delay = 10;
109 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
110                 S_IRUGO | S_IWUSR);
111 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
112
113 static int srp_fast_io_fail_tmo = 15;
114 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
115                 S_IRUGO | S_IWUSR);
116 MODULE_PARM_DESC(fast_io_fail_tmo,
117                  "Number of seconds between the observation of a transport"
118                  " layer error and failing all I/O. \"off\" means that this"
119                  " functionality is disabled.");
120
121 static int srp_dev_loss_tmo = 600;
122 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
123                 S_IRUGO | S_IWUSR);
124 MODULE_PARM_DESC(dev_loss_tmo,
125                  "Maximum number of seconds that the SRP transport should"
126                  " insulate transport layer errors. After this time has been"
127                  " exceeded the SCSI host is removed. Should be"
128                  " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
129                  " if fast_io_fail_tmo has not been set. \"off\" means that"
130                  " this functionality is disabled.");
131
132 static unsigned ch_count;
133 module_param(ch_count, uint, 0444);
134 MODULE_PARM_DESC(ch_count,
135                  "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
136
137 static void srp_add_one(struct ib_device *device);
138 static void srp_remove_one(struct ib_device *device, void *client_data);
139 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
140 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
141                 const char *opname);
142 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
143
144 static struct scsi_transport_template *ib_srp_transport_template;
145 static struct workqueue_struct *srp_remove_wq;
146
147 static struct ib_client srp_client = {
148         .name   = "srp",
149         .add    = srp_add_one,
150         .remove = srp_remove_one
151 };
152
153 static struct ib_sa_client srp_sa_client;
154
155 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
156 {
157         int tmo = *(int *)kp->arg;
158
159         if (tmo >= 0)
160                 return sprintf(buffer, "%d", tmo);
161         else
162                 return sprintf(buffer, "off");
163 }
164
165 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
166 {
167         int tmo, res;
168
169         res = srp_parse_tmo(&tmo, val);
170         if (res)
171                 goto out;
172
173         if (kp->arg == &srp_reconnect_delay)
174                 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
175                                     srp_dev_loss_tmo);
176         else if (kp->arg == &srp_fast_io_fail_tmo)
177                 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
178         else
179                 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
180                                     tmo);
181         if (res)
182                 goto out;
183         *(int *)kp->arg = tmo;
184
185 out:
186         return res;
187 }
188
189 static const struct kernel_param_ops srp_tmo_ops = {
190         .get = srp_tmo_get,
191         .set = srp_tmo_set,
192 };
193
194 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
195 {
196         return (struct srp_target_port *) host->hostdata;
197 }
198
199 static const char *srp_target_info(struct Scsi_Host *host)
200 {
201         return host_to_target(host)->target_name;
202 }
203
204 static int srp_target_is_topspin(struct srp_target_port *target)
205 {
206         static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
207         static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
208
209         return topspin_workarounds &&
210                 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
211                  !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
212 }
213
214 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
215                                    gfp_t gfp_mask,
216                                    enum dma_data_direction direction)
217 {
218         struct srp_iu *iu;
219
220         iu = kmalloc(sizeof *iu, gfp_mask);
221         if (!iu)
222                 goto out;
223
224         iu->buf = kzalloc(size, gfp_mask);
225         if (!iu->buf)
226                 goto out_free_iu;
227
228         iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
229                                     direction);
230         if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
231                 goto out_free_buf;
232
233         iu->size      = size;
234         iu->direction = direction;
235
236         return iu;
237
238 out_free_buf:
239         kfree(iu->buf);
240 out_free_iu:
241         kfree(iu);
242 out:
243         return NULL;
244 }
245
246 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
247 {
248         if (!iu)
249                 return;
250
251         ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
252                             iu->direction);
253         kfree(iu->buf);
254         kfree(iu);
255 }
256
257 static void srp_qp_event(struct ib_event *event, void *context)
258 {
259         pr_debug("QP event %s (%d)\n",
260                  ib_event_msg(event->event), event->event);
261 }
262
263 static int srp_init_qp(struct srp_target_port *target,
264                        struct ib_qp *qp)
265 {
266         struct ib_qp_attr *attr;
267         int ret;
268
269         attr = kmalloc(sizeof *attr, GFP_KERNEL);
270         if (!attr)
271                 return -ENOMEM;
272
273         ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
274                                   target->srp_host->port,
275                                   be16_to_cpu(target->pkey),
276                                   &attr->pkey_index);
277         if (ret)
278                 goto out;
279
280         attr->qp_state        = IB_QPS_INIT;
281         attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
282                                     IB_ACCESS_REMOTE_WRITE);
283         attr->port_num        = target->srp_host->port;
284
285         ret = ib_modify_qp(qp, attr,
286                            IB_QP_STATE          |
287                            IB_QP_PKEY_INDEX     |
288                            IB_QP_ACCESS_FLAGS   |
289                            IB_QP_PORT);
290
291 out:
292         kfree(attr);
293         return ret;
294 }
295
296 static int srp_new_cm_id(struct srp_rdma_ch *ch)
297 {
298         struct srp_target_port *target = ch->target;
299         struct ib_cm_id *new_cm_id;
300
301         new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
302                                     srp_cm_handler, ch);
303         if (IS_ERR(new_cm_id))
304                 return PTR_ERR(new_cm_id);
305
306         if (ch->cm_id)
307                 ib_destroy_cm_id(ch->cm_id);
308         ch->cm_id = new_cm_id;
309         ch->path.sgid = target->sgid;
310         ch->path.dgid = target->orig_dgid;
311         ch->path.pkey = target->pkey;
312         ch->path.service_id = target->service_id;
313
314         return 0;
315 }
316
317 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
318 {
319         struct srp_device *dev = target->srp_host->srp_dev;
320         struct ib_fmr_pool_param fmr_param;
321
322         memset(&fmr_param, 0, sizeof(fmr_param));
323         fmr_param.pool_size         = target->mr_pool_size;
324         fmr_param.dirty_watermark   = fmr_param.pool_size / 4;
325         fmr_param.cache             = 1;
326         fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
327         fmr_param.page_shift        = ilog2(dev->mr_page_size);
328         fmr_param.access            = (IB_ACCESS_LOCAL_WRITE |
329                                        IB_ACCESS_REMOTE_WRITE |
330                                        IB_ACCESS_REMOTE_READ);
331
332         return ib_create_fmr_pool(dev->pd, &fmr_param);
333 }
334
335 /**
336  * srp_destroy_fr_pool() - free the resources owned by a pool
337  * @pool: Fast registration pool to be destroyed.
338  */
339 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
340 {
341         int i;
342         struct srp_fr_desc *d;
343
344         if (!pool)
345                 return;
346
347         for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
348                 if (d->mr)
349                         ib_dereg_mr(d->mr);
350         }
351         kfree(pool);
352 }
353
354 /**
355  * srp_create_fr_pool() - allocate and initialize a pool for fast registration
356  * @device:            IB device to allocate fast registration descriptors for.
357  * @pd:                Protection domain associated with the FR descriptors.
358  * @pool_size:         Number of descriptors to allocate.
359  * @max_page_list_len: Maximum fast registration work request page list length.
360  */
361 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
362                                               struct ib_pd *pd, int pool_size,
363                                               int max_page_list_len)
364 {
365         struct srp_fr_pool *pool;
366         struct srp_fr_desc *d;
367         struct ib_mr *mr;
368         int i, ret = -EINVAL;
369
370         if (pool_size <= 0)
371                 goto err;
372         ret = -ENOMEM;
373         pool = kzalloc(sizeof(struct srp_fr_pool) +
374                        pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
375         if (!pool)
376                 goto err;
377         pool->size = pool_size;
378         pool->max_page_list_len = max_page_list_len;
379         spin_lock_init(&pool->lock);
380         INIT_LIST_HEAD(&pool->free_list);
381
382         for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
383                 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
384                                  max_page_list_len);
385                 if (IS_ERR(mr)) {
386                         ret = PTR_ERR(mr);
387                         goto destroy_pool;
388                 }
389                 d->mr = mr;
390                 list_add_tail(&d->entry, &pool->free_list);
391         }
392
393 out:
394         return pool;
395
396 destroy_pool:
397         srp_destroy_fr_pool(pool);
398
399 err:
400         pool = ERR_PTR(ret);
401         goto out;
402 }
403
404 /**
405  * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
406  * @pool: Pool to obtain descriptor from.
407  */
408 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
409 {
410         struct srp_fr_desc *d = NULL;
411         unsigned long flags;
412
413         spin_lock_irqsave(&pool->lock, flags);
414         if (!list_empty(&pool->free_list)) {
415                 d = list_first_entry(&pool->free_list, typeof(*d), entry);
416                 list_del(&d->entry);
417         }
418         spin_unlock_irqrestore(&pool->lock, flags);
419
420         return d;
421 }
422
423 /**
424  * srp_fr_pool_put() - put an FR descriptor back in the free list
425  * @pool: Pool the descriptor was allocated from.
426  * @desc: Pointer to an array of fast registration descriptor pointers.
427  * @n:    Number of descriptors to put back.
428  *
429  * Note: The caller must already have queued an invalidation request for
430  * desc->mr->rkey before calling this function.
431  */
432 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
433                             int n)
434 {
435         unsigned long flags;
436         int i;
437
438         spin_lock_irqsave(&pool->lock, flags);
439         for (i = 0; i < n; i++)
440                 list_add(&desc[i]->entry, &pool->free_list);
441         spin_unlock_irqrestore(&pool->lock, flags);
442 }
443
444 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
445 {
446         struct srp_device *dev = target->srp_host->srp_dev;
447
448         return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
449                                   dev->max_pages_per_mr);
450 }
451
452 /**
453  * srp_destroy_qp() - destroy an RDMA queue pair
454  * @qp: RDMA queue pair.
455  *
456  * Drain the qp before destroying it.  This avoids that the receive
457  * completion handler can access the queue pair while it is
458  * being destroyed.
459  */
460 static void srp_destroy_qp(struct ib_qp *qp)
461 {
462         ib_drain_rq(qp);
463         ib_destroy_qp(qp);
464 }
465
466 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
467 {
468         struct srp_target_port *target = ch->target;
469         struct srp_device *dev = target->srp_host->srp_dev;
470         struct ib_qp_init_attr *init_attr;
471         struct ib_cq *recv_cq, *send_cq;
472         struct ib_qp *qp;
473         struct ib_fmr_pool *fmr_pool = NULL;
474         struct srp_fr_pool *fr_pool = NULL;
475         const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
476         int ret;
477
478         init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
479         if (!init_attr)
480                 return -ENOMEM;
481
482         /* queue_size + 1 for ib_drain_rq() */
483         recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
484                                 ch->comp_vector, IB_POLL_SOFTIRQ);
485         if (IS_ERR(recv_cq)) {
486                 ret = PTR_ERR(recv_cq);
487                 goto err;
488         }
489
490         send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
491                                 ch->comp_vector, IB_POLL_DIRECT);
492         if (IS_ERR(send_cq)) {
493                 ret = PTR_ERR(send_cq);
494                 goto err_recv_cq;
495         }
496
497         init_attr->event_handler       = srp_qp_event;
498         init_attr->cap.max_send_wr     = m * target->queue_size;
499         init_attr->cap.max_recv_wr     = target->queue_size + 1;
500         init_attr->cap.max_recv_sge    = 1;
501         init_attr->cap.max_send_sge    = 1;
502         init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
503         init_attr->qp_type             = IB_QPT_RC;
504         init_attr->send_cq             = send_cq;
505         init_attr->recv_cq             = recv_cq;
506
507         qp = ib_create_qp(dev->pd, init_attr);
508         if (IS_ERR(qp)) {
509                 ret = PTR_ERR(qp);
510                 goto err_send_cq;
511         }
512
513         ret = srp_init_qp(target, qp);
514         if (ret)
515                 goto err_qp;
516
517         if (dev->use_fast_reg) {
518                 fr_pool = srp_alloc_fr_pool(target);
519                 if (IS_ERR(fr_pool)) {
520                         ret = PTR_ERR(fr_pool);
521                         shost_printk(KERN_WARNING, target->scsi_host, PFX
522                                      "FR pool allocation failed (%d)\n", ret);
523                         goto err_qp;
524                 }
525         } else if (dev->use_fmr) {
526                 fmr_pool = srp_alloc_fmr_pool(target);
527                 if (IS_ERR(fmr_pool)) {
528                         ret = PTR_ERR(fmr_pool);
529                         shost_printk(KERN_WARNING, target->scsi_host, PFX
530                                      "FMR pool allocation failed (%d)\n", ret);
531                         goto err_qp;
532                 }
533         }
534
535         if (ch->qp)
536                 srp_destroy_qp(ch->qp);
537         if (ch->recv_cq)
538                 ib_free_cq(ch->recv_cq);
539         if (ch->send_cq)
540                 ib_free_cq(ch->send_cq);
541
542         ch->qp = qp;
543         ch->recv_cq = recv_cq;
544         ch->send_cq = send_cq;
545
546         if (dev->use_fast_reg) {
547                 if (ch->fr_pool)
548                         srp_destroy_fr_pool(ch->fr_pool);
549                 ch->fr_pool = fr_pool;
550         } else if (dev->use_fmr) {
551                 if (ch->fmr_pool)
552                         ib_destroy_fmr_pool(ch->fmr_pool);
553                 ch->fmr_pool = fmr_pool;
554         }
555
556         kfree(init_attr);
557         return 0;
558
559 err_qp:
560         srp_destroy_qp(qp);
561
562 err_send_cq:
563         ib_free_cq(send_cq);
564
565 err_recv_cq:
566         ib_free_cq(recv_cq);
567
568 err:
569         kfree(init_attr);
570         return ret;
571 }
572
573 /*
574  * Note: this function may be called without srp_alloc_iu_bufs() having been
575  * invoked. Hence the ch->[rt]x_ring checks.
576  */
577 static void srp_free_ch_ib(struct srp_target_port *target,
578                            struct srp_rdma_ch *ch)
579 {
580         struct srp_device *dev = target->srp_host->srp_dev;
581         int i;
582
583         if (!ch->target)
584                 return;
585
586         if (ch->cm_id) {
587                 ib_destroy_cm_id(ch->cm_id);
588                 ch->cm_id = NULL;
589         }
590
591         /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
592         if (!ch->qp)
593                 return;
594
595         if (dev->use_fast_reg) {
596                 if (ch->fr_pool)
597                         srp_destroy_fr_pool(ch->fr_pool);
598         } else if (dev->use_fmr) {
599                 if (ch->fmr_pool)
600                         ib_destroy_fmr_pool(ch->fmr_pool);
601         }
602
603         srp_destroy_qp(ch->qp);
604         ib_free_cq(ch->send_cq);
605         ib_free_cq(ch->recv_cq);
606
607         /*
608          * Avoid that the SCSI error handler tries to use this channel after
609          * it has been freed. The SCSI error handler can namely continue
610          * trying to perform recovery actions after scsi_remove_host()
611          * returned.
612          */
613         ch->target = NULL;
614
615         ch->qp = NULL;
616         ch->send_cq = ch->recv_cq = NULL;
617
618         if (ch->rx_ring) {
619                 for (i = 0; i < target->queue_size; ++i)
620                         srp_free_iu(target->srp_host, ch->rx_ring[i]);
621                 kfree(ch->rx_ring);
622                 ch->rx_ring = NULL;
623         }
624         if (ch->tx_ring) {
625                 for (i = 0; i < target->queue_size; ++i)
626                         srp_free_iu(target->srp_host, ch->tx_ring[i]);
627                 kfree(ch->tx_ring);
628                 ch->tx_ring = NULL;
629         }
630 }
631
632 static void srp_path_rec_completion(int status,
633                                     struct ib_sa_path_rec *pathrec,
634                                     void *ch_ptr)
635 {
636         struct srp_rdma_ch *ch = ch_ptr;
637         struct srp_target_port *target = ch->target;
638
639         ch->status = status;
640         if (status)
641                 shost_printk(KERN_ERR, target->scsi_host,
642                              PFX "Got failed path rec status %d\n", status);
643         else
644                 ch->path = *pathrec;
645         complete(&ch->done);
646 }
647
648 static int srp_lookup_path(struct srp_rdma_ch *ch)
649 {
650         struct srp_target_port *target = ch->target;
651         int ret;
652
653         ch->path.numb_path = 1;
654
655         init_completion(&ch->done);
656
657         ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
658                                                target->srp_host->srp_dev->dev,
659                                                target->srp_host->port,
660                                                &ch->path,
661                                                IB_SA_PATH_REC_SERVICE_ID |
662                                                IB_SA_PATH_REC_DGID       |
663                                                IB_SA_PATH_REC_SGID       |
664                                                IB_SA_PATH_REC_NUMB_PATH  |
665                                                IB_SA_PATH_REC_PKEY,
666                                                SRP_PATH_REC_TIMEOUT_MS,
667                                                GFP_KERNEL,
668                                                srp_path_rec_completion,
669                                                ch, &ch->path_query);
670         if (ch->path_query_id < 0)
671                 return ch->path_query_id;
672
673         ret = wait_for_completion_interruptible(&ch->done);
674         if (ret < 0)
675                 return ret;
676
677         if (ch->status < 0)
678                 shost_printk(KERN_WARNING, target->scsi_host,
679                              PFX "Path record query failed\n");
680
681         return ch->status;
682 }
683
684 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
685 {
686         struct srp_target_port *target = ch->target;
687         struct {
688                 struct ib_cm_req_param param;
689                 struct srp_login_req   priv;
690         } *req = NULL;
691         int status;
692
693         req = kzalloc(sizeof *req, GFP_KERNEL);
694         if (!req)
695                 return -ENOMEM;
696
697         req->param.primary_path               = &ch->path;
698         req->param.alternate_path             = NULL;
699         req->param.service_id                 = target->service_id;
700         req->param.qp_num                     = ch->qp->qp_num;
701         req->param.qp_type                    = ch->qp->qp_type;
702         req->param.private_data               = &req->priv;
703         req->param.private_data_len           = sizeof req->priv;
704         req->param.flow_control               = 1;
705
706         get_random_bytes(&req->param.starting_psn, 4);
707         req->param.starting_psn              &= 0xffffff;
708
709         /*
710          * Pick some arbitrary defaults here; we could make these
711          * module parameters if anyone cared about setting them.
712          */
713         req->param.responder_resources        = 4;
714         req->param.remote_cm_response_timeout = 20;
715         req->param.local_cm_response_timeout  = 20;
716         req->param.retry_count                = target->tl_retry_count;
717         req->param.rnr_retry_count            = 7;
718         req->param.max_cm_retries             = 15;
719
720         req->priv.opcode        = SRP_LOGIN_REQ;
721         req->priv.tag           = 0;
722         req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
723         req->priv.req_buf_fmt   = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
724                                               SRP_BUF_FORMAT_INDIRECT);
725         req->priv.req_flags     = (multich ? SRP_MULTICHAN_MULTI :
726                                    SRP_MULTICHAN_SINGLE);
727         /*
728          * In the published SRP specification (draft rev. 16a), the
729          * port identifier format is 8 bytes of ID extension followed
730          * by 8 bytes of GUID.  Older drafts put the two halves in the
731          * opposite order, so that the GUID comes first.
732          *
733          * Targets conforming to these obsolete drafts can be
734          * recognized by the I/O Class they report.
735          */
736         if (target->io_class == SRP_REV10_IB_IO_CLASS) {
737                 memcpy(req->priv.initiator_port_id,
738                        &target->sgid.global.interface_id, 8);
739                 memcpy(req->priv.initiator_port_id + 8,
740                        &target->initiator_ext, 8);
741                 memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
742                 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
743         } else {
744                 memcpy(req->priv.initiator_port_id,
745                        &target->initiator_ext, 8);
746                 memcpy(req->priv.initiator_port_id + 8,
747                        &target->sgid.global.interface_id, 8);
748                 memcpy(req->priv.target_port_id,     &target->id_ext, 8);
749                 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
750         }
751
752         /*
753          * Topspin/Cisco SRP targets will reject our login unless we
754          * zero out the first 8 bytes of our initiator port ID and set
755          * the second 8 bytes to the local node GUID.
756          */
757         if (srp_target_is_topspin(target)) {
758                 shost_printk(KERN_DEBUG, target->scsi_host,
759                              PFX "Topspin/Cisco initiator port ID workaround "
760                              "activated for target GUID %016llx\n",
761                              be64_to_cpu(target->ioc_guid));
762                 memset(req->priv.initiator_port_id, 0, 8);
763                 memcpy(req->priv.initiator_port_id + 8,
764                        &target->srp_host->srp_dev->dev->node_guid, 8);
765         }
766
767         status = ib_send_cm_req(ch->cm_id, &req->param);
768
769         kfree(req);
770
771         return status;
772 }
773
774 static bool srp_queue_remove_work(struct srp_target_port *target)
775 {
776         bool changed = false;
777
778         spin_lock_irq(&target->lock);
779         if (target->state != SRP_TARGET_REMOVED) {
780                 target->state = SRP_TARGET_REMOVED;
781                 changed = true;
782         }
783         spin_unlock_irq(&target->lock);
784
785         if (changed)
786                 queue_work(srp_remove_wq, &target->remove_work);
787
788         return changed;
789 }
790
791 static void srp_disconnect_target(struct srp_target_port *target)
792 {
793         struct srp_rdma_ch *ch;
794         int i;
795
796         /* XXX should send SRP_I_LOGOUT request */
797
798         for (i = 0; i < target->ch_count; i++) {
799                 ch = &target->ch[i];
800                 ch->connected = false;
801                 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
802                         shost_printk(KERN_DEBUG, target->scsi_host,
803                                      PFX "Sending CM DREQ failed\n");
804                 }
805         }
806 }
807
808 static void srp_free_req_data(struct srp_target_port *target,
809                               struct srp_rdma_ch *ch)
810 {
811         struct srp_device *dev = target->srp_host->srp_dev;
812         struct ib_device *ibdev = dev->dev;
813         struct srp_request *req;
814         int i;
815
816         if (!ch->req_ring)
817                 return;
818
819         for (i = 0; i < target->req_ring_size; ++i) {
820                 req = &ch->req_ring[i];
821                 if (dev->use_fast_reg) {
822                         kfree(req->fr_list);
823                 } else {
824                         kfree(req->fmr_list);
825                         kfree(req->map_page);
826                 }
827                 if (req->indirect_dma_addr) {
828                         ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
829                                             target->indirect_size,
830                                             DMA_TO_DEVICE);
831                 }
832                 kfree(req->indirect_desc);
833         }
834
835         kfree(ch->req_ring);
836         ch->req_ring = NULL;
837 }
838
839 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
840 {
841         struct srp_target_port *target = ch->target;
842         struct srp_device *srp_dev = target->srp_host->srp_dev;
843         struct ib_device *ibdev = srp_dev->dev;
844         struct srp_request *req;
845         void *mr_list;
846         dma_addr_t dma_addr;
847         int i, ret = -ENOMEM;
848
849         ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
850                                GFP_KERNEL);
851         if (!ch->req_ring)
852                 goto out;
853
854         for (i = 0; i < target->req_ring_size; ++i) {
855                 req = &ch->req_ring[i];
856                 mr_list = kmalloc(target->mr_per_cmd * sizeof(void *),
857                                   GFP_KERNEL);
858                 if (!mr_list)
859                         goto out;
860                 if (srp_dev->use_fast_reg) {
861                         req->fr_list = mr_list;
862                 } else {
863                         req->fmr_list = mr_list;
864                         req->map_page = kmalloc(srp_dev->max_pages_per_mr *
865                                                 sizeof(void *), GFP_KERNEL);
866                         if (!req->map_page)
867                                 goto out;
868                 }
869                 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
870                 if (!req->indirect_desc)
871                         goto out;
872
873                 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
874                                              target->indirect_size,
875                                              DMA_TO_DEVICE);
876                 if (ib_dma_mapping_error(ibdev, dma_addr))
877                         goto out;
878
879                 req->indirect_dma_addr = dma_addr;
880         }
881         ret = 0;
882
883 out:
884         return ret;
885 }
886
887 /**
888  * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
889  * @shost: SCSI host whose attributes to remove from sysfs.
890  *
891  * Note: Any attributes defined in the host template and that did not exist
892  * before invocation of this function will be ignored.
893  */
894 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
895 {
896         struct device_attribute **attr;
897
898         for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
899                 device_remove_file(&shost->shost_dev, *attr);
900 }
901
902 static void srp_remove_target(struct srp_target_port *target)
903 {
904         struct srp_rdma_ch *ch;
905         int i;
906
907         WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
908
909         srp_del_scsi_host_attr(target->scsi_host);
910         srp_rport_get(target->rport);
911         srp_remove_host(target->scsi_host);
912         scsi_remove_host(target->scsi_host);
913         srp_stop_rport_timers(target->rport);
914         srp_disconnect_target(target);
915         for (i = 0; i < target->ch_count; i++) {
916                 ch = &target->ch[i];
917                 srp_free_ch_ib(target, ch);
918         }
919         cancel_work_sync(&target->tl_err_work);
920         srp_rport_put(target->rport);
921         for (i = 0; i < target->ch_count; i++) {
922                 ch = &target->ch[i];
923                 srp_free_req_data(target, ch);
924         }
925         kfree(target->ch);
926         target->ch = NULL;
927
928         spin_lock(&target->srp_host->target_lock);
929         list_del(&target->list);
930         spin_unlock(&target->srp_host->target_lock);
931
932         scsi_host_put(target->scsi_host);
933 }
934
935 static void srp_remove_work(struct work_struct *work)
936 {
937         struct srp_target_port *target =
938                 container_of(work, struct srp_target_port, remove_work);
939
940         WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
941
942         srp_remove_target(target);
943 }
944
945 static void srp_rport_delete(struct srp_rport *rport)
946 {
947         struct srp_target_port *target = rport->lld_data;
948
949         srp_queue_remove_work(target);
950 }
951
952 /**
953  * srp_connected_ch() - number of connected channels
954  * @target: SRP target port.
955  */
956 static int srp_connected_ch(struct srp_target_port *target)
957 {
958         int i, c = 0;
959
960         for (i = 0; i < target->ch_count; i++)
961                 c += target->ch[i].connected;
962
963         return c;
964 }
965
966 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
967 {
968         struct srp_target_port *target = ch->target;
969         int ret;
970
971         WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
972
973         ret = srp_lookup_path(ch);
974         if (ret)
975                 goto out;
976
977         while (1) {
978                 init_completion(&ch->done);
979                 ret = srp_send_req(ch, multich);
980                 if (ret)
981                         goto out;
982                 ret = wait_for_completion_interruptible(&ch->done);
983                 if (ret < 0)
984                         goto out;
985
986                 /*
987                  * The CM event handling code will set status to
988                  * SRP_PORT_REDIRECT if we get a port redirect REJ
989                  * back, or SRP_DLID_REDIRECT if we get a lid/qp
990                  * redirect REJ back.
991                  */
992                 ret = ch->status;
993                 switch (ret) {
994                 case 0:
995                         ch->connected = true;
996                         goto out;
997
998                 case SRP_PORT_REDIRECT:
999                         ret = srp_lookup_path(ch);
1000                         if (ret)
1001                                 goto out;
1002                         break;
1003
1004                 case SRP_DLID_REDIRECT:
1005                         break;
1006
1007                 case SRP_STALE_CONN:
1008                         shost_printk(KERN_ERR, target->scsi_host, PFX
1009                                      "giving up on stale connection\n");
1010                         ret = -ECONNRESET;
1011                         goto out;
1012
1013                 default:
1014                         goto out;
1015                 }
1016         }
1017
1018 out:
1019         return ret <= 0 ? ret : -ENODEV;
1020 }
1021
1022 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1023 {
1024         srp_handle_qp_err(cq, wc, "INV RKEY");
1025 }
1026
1027 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1028                 u32 rkey)
1029 {
1030         struct ib_send_wr *bad_wr;
1031         struct ib_send_wr wr = {
1032                 .opcode             = IB_WR_LOCAL_INV,
1033                 .next               = NULL,
1034                 .num_sge            = 0,
1035                 .send_flags         = 0,
1036                 .ex.invalidate_rkey = rkey,
1037         };
1038
1039         wr.wr_cqe = &req->reg_cqe;
1040         req->reg_cqe.done = srp_inv_rkey_err_done;
1041         return ib_post_send(ch->qp, &wr, &bad_wr);
1042 }
1043
1044 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1045                            struct srp_rdma_ch *ch,
1046                            struct srp_request *req)
1047 {
1048         struct srp_target_port *target = ch->target;
1049         struct srp_device *dev = target->srp_host->srp_dev;
1050         struct ib_device *ibdev = dev->dev;
1051         int i, res;
1052
1053         if (!scsi_sglist(scmnd) ||
1054             (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1055              scmnd->sc_data_direction != DMA_FROM_DEVICE))
1056                 return;
1057
1058         if (dev->use_fast_reg) {
1059                 struct srp_fr_desc **pfr;
1060
1061                 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1062                         res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1063                         if (res < 0) {
1064                                 shost_printk(KERN_ERR, target->scsi_host, PFX
1065                                   "Queueing INV WR for rkey %#x failed (%d)\n",
1066                                   (*pfr)->mr->rkey, res);
1067                                 queue_work(system_long_wq,
1068                                            &target->tl_err_work);
1069                         }
1070                 }
1071                 if (req->nmdesc)
1072                         srp_fr_pool_put(ch->fr_pool, req->fr_list,
1073                                         req->nmdesc);
1074         } else if (dev->use_fmr) {
1075                 struct ib_pool_fmr **pfmr;
1076
1077                 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1078                         ib_fmr_pool_unmap(*pfmr);
1079         }
1080
1081         ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1082                         scmnd->sc_data_direction);
1083 }
1084
1085 /**
1086  * srp_claim_req - Take ownership of the scmnd associated with a request.
1087  * @ch: SRP RDMA channel.
1088  * @req: SRP request.
1089  * @sdev: If not NULL, only take ownership for this SCSI device.
1090  * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1091  *         ownership of @req->scmnd if it equals @scmnd.
1092  *
1093  * Return value:
1094  * Either NULL or a pointer to the SCSI command the caller became owner of.
1095  */
1096 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1097                                        struct srp_request *req,
1098                                        struct scsi_device *sdev,
1099                                        struct scsi_cmnd *scmnd)
1100 {
1101         unsigned long flags;
1102
1103         spin_lock_irqsave(&ch->lock, flags);
1104         if (req->scmnd &&
1105             (!sdev || req->scmnd->device == sdev) &&
1106             (!scmnd || req->scmnd == scmnd)) {
1107                 scmnd = req->scmnd;
1108                 req->scmnd = NULL;
1109         } else {
1110                 scmnd = NULL;
1111         }
1112         spin_unlock_irqrestore(&ch->lock, flags);
1113
1114         return scmnd;
1115 }
1116
1117 /**
1118  * srp_free_req() - Unmap data and adjust ch->req_lim.
1119  * @ch:     SRP RDMA channel.
1120  * @req:    Request to be freed.
1121  * @scmnd:  SCSI command associated with @req.
1122  * @req_lim_delta: Amount to be added to @target->req_lim.
1123  */
1124 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1125                          struct scsi_cmnd *scmnd, s32 req_lim_delta)
1126 {
1127         unsigned long flags;
1128
1129         srp_unmap_data(scmnd, ch, req);
1130
1131         spin_lock_irqsave(&ch->lock, flags);
1132         ch->req_lim += req_lim_delta;
1133         spin_unlock_irqrestore(&ch->lock, flags);
1134 }
1135
1136 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1137                            struct scsi_device *sdev, int result)
1138 {
1139         struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1140
1141         if (scmnd) {
1142                 srp_free_req(ch, req, scmnd, 0);
1143                 scmnd->result = result;
1144                 scmnd->scsi_done(scmnd);
1145         }
1146 }
1147
1148 static void srp_terminate_io(struct srp_rport *rport)
1149 {
1150         struct srp_target_port *target = rport->lld_data;
1151         struct srp_rdma_ch *ch;
1152         struct Scsi_Host *shost = target->scsi_host;
1153         struct scsi_device *sdev;
1154         int i, j;
1155
1156         /*
1157          * Invoking srp_terminate_io() while srp_queuecommand() is running
1158          * is not safe. Hence the warning statement below.
1159          */
1160         shost_for_each_device(sdev, shost)
1161                 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1162
1163         for (i = 0; i < target->ch_count; i++) {
1164                 ch = &target->ch[i];
1165
1166                 for (j = 0; j < target->req_ring_size; ++j) {
1167                         struct srp_request *req = &ch->req_ring[j];
1168
1169                         srp_finish_req(ch, req, NULL,
1170                                        DID_TRANSPORT_FAILFAST << 16);
1171                 }
1172         }
1173 }
1174
1175 /*
1176  * It is up to the caller to ensure that srp_rport_reconnect() calls are
1177  * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1178  * srp_reset_device() or srp_reset_host() calls will occur while this function
1179  * is in progress. One way to realize that is not to call this function
1180  * directly but to call srp_reconnect_rport() instead since that last function
1181  * serializes calls of this function via rport->mutex and also blocks
1182  * srp_queuecommand() calls before invoking this function.
1183  */
1184 static int srp_rport_reconnect(struct srp_rport *rport)
1185 {
1186         struct srp_target_port *target = rport->lld_data;
1187         struct srp_rdma_ch *ch;
1188         int i, j, ret = 0;
1189         bool multich = false;
1190
1191         srp_disconnect_target(target);
1192
1193         if (target->state == SRP_TARGET_SCANNING)
1194                 return -ENODEV;
1195
1196         /*
1197          * Now get a new local CM ID so that we avoid confusing the target in
1198          * case things are really fouled up. Doing so also ensures that all CM
1199          * callbacks will have finished before a new QP is allocated.
1200          */
1201         for (i = 0; i < target->ch_count; i++) {
1202                 ch = &target->ch[i];
1203                 ret += srp_new_cm_id(ch);
1204         }
1205         for (i = 0; i < target->ch_count; i++) {
1206                 ch = &target->ch[i];
1207                 for (j = 0; j < target->req_ring_size; ++j) {
1208                         struct srp_request *req = &ch->req_ring[j];
1209
1210                         srp_finish_req(ch, req, NULL, DID_RESET << 16);
1211                 }
1212         }
1213         for (i = 0; i < target->ch_count; i++) {
1214                 ch = &target->ch[i];
1215                 /*
1216                  * Whether or not creating a new CM ID succeeded, create a new
1217                  * QP. This guarantees that all completion callback function
1218                  * invocations have finished before request resetting starts.
1219                  */
1220                 ret += srp_create_ch_ib(ch);
1221
1222                 INIT_LIST_HEAD(&ch->free_tx);
1223                 for (j = 0; j < target->queue_size; ++j)
1224                         list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1225         }
1226
1227         target->qp_in_error = false;
1228
1229         for (i = 0; i < target->ch_count; i++) {
1230                 ch = &target->ch[i];
1231                 if (ret)
1232                         break;
1233                 ret = srp_connect_ch(ch, multich);
1234                 multich = true;
1235         }
1236
1237         if (ret == 0)
1238                 shost_printk(KERN_INFO, target->scsi_host,
1239                              PFX "reconnect succeeded\n");
1240
1241         return ret;
1242 }
1243
1244 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1245                          unsigned int dma_len, u32 rkey)
1246 {
1247         struct srp_direct_buf *desc = state->desc;
1248
1249         WARN_ON_ONCE(!dma_len);
1250
1251         desc->va = cpu_to_be64(dma_addr);
1252         desc->key = cpu_to_be32(rkey);
1253         desc->len = cpu_to_be32(dma_len);
1254
1255         state->total_len += dma_len;
1256         state->desc++;
1257         state->ndesc++;
1258 }
1259
1260 static int srp_map_finish_fmr(struct srp_map_state *state,
1261                               struct srp_rdma_ch *ch)
1262 {
1263         struct srp_target_port *target = ch->target;
1264         struct srp_device *dev = target->srp_host->srp_dev;
1265         struct ib_pool_fmr *fmr;
1266         u64 io_addr = 0;
1267
1268         if (state->fmr.next >= state->fmr.end)
1269                 return -ENOMEM;
1270
1271         WARN_ON_ONCE(!dev->use_fmr);
1272
1273         if (state->npages == 0)
1274                 return 0;
1275
1276         if (state->npages == 1 && target->global_mr) {
1277                 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1278                              target->global_mr->rkey);
1279                 goto reset_state;
1280         }
1281
1282         fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1283                                    state->npages, io_addr);
1284         if (IS_ERR(fmr))
1285                 return PTR_ERR(fmr);
1286
1287         *state->fmr.next++ = fmr;
1288         state->nmdesc++;
1289
1290         srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1291                      state->dma_len, fmr->fmr->rkey);
1292
1293 reset_state:
1294         state->npages = 0;
1295         state->dma_len = 0;
1296
1297         return 0;
1298 }
1299
1300 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1301 {
1302         srp_handle_qp_err(cq, wc, "FAST REG");
1303 }
1304
1305 /*
1306  * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1307  * where to start in the first element. If sg_offset_p != NULL then
1308  * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1309  * byte that has not yet been mapped.
1310  */
1311 static int srp_map_finish_fr(struct srp_map_state *state,
1312                              struct srp_request *req,
1313                              struct srp_rdma_ch *ch, int sg_nents,
1314                              unsigned int *sg_offset_p)
1315 {
1316         struct srp_target_port *target = ch->target;
1317         struct srp_device *dev = target->srp_host->srp_dev;
1318         struct ib_send_wr *bad_wr;
1319         struct ib_reg_wr wr;
1320         struct srp_fr_desc *desc;
1321         u32 rkey;
1322         int n, err;
1323
1324         if (state->fr.next >= state->fr.end)
1325                 return -ENOMEM;
1326
1327         WARN_ON_ONCE(!dev->use_fast_reg);
1328
1329         if (sg_nents == 1 && target->global_mr) {
1330                 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1331
1332                 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1333                              sg_dma_len(state->sg) - sg_offset,
1334                              target->global_mr->rkey);
1335                 if (sg_offset_p)
1336                         *sg_offset_p = 0;
1337                 return 1;
1338         }
1339
1340         desc = srp_fr_pool_get(ch->fr_pool);
1341         if (!desc)
1342                 return -ENOMEM;
1343
1344         rkey = ib_inc_rkey(desc->mr->rkey);
1345         ib_update_fast_reg_key(desc->mr, rkey);
1346
1347         n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1348                          dev->mr_page_size);
1349         if (unlikely(n < 0)) {
1350                 srp_fr_pool_put(ch->fr_pool, &desc, 1);
1351                 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1352                          dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1353                          sg_offset_p ? *sg_offset_p : -1, n);
1354                 return n;
1355         }
1356
1357         WARN_ON_ONCE(desc->mr->length == 0);
1358
1359         req->reg_cqe.done = srp_reg_mr_err_done;
1360
1361         wr.wr.next = NULL;
1362         wr.wr.opcode = IB_WR_REG_MR;
1363         wr.wr.wr_cqe = &req->reg_cqe;
1364         wr.wr.num_sge = 0;
1365         wr.wr.send_flags = 0;
1366         wr.mr = desc->mr;
1367         wr.key = desc->mr->rkey;
1368         wr.access = (IB_ACCESS_LOCAL_WRITE |
1369                      IB_ACCESS_REMOTE_READ |
1370                      IB_ACCESS_REMOTE_WRITE);
1371
1372         *state->fr.next++ = desc;
1373         state->nmdesc++;
1374
1375         srp_map_desc(state, desc->mr->iova,
1376                      desc->mr->length, desc->mr->rkey);
1377
1378         err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
1379         if (unlikely(err)) {
1380                 WARN_ON_ONCE(err == -ENOMEM);
1381                 return err;
1382         }
1383
1384         return n;
1385 }
1386
1387 static int srp_map_sg_entry(struct srp_map_state *state,
1388                             struct srp_rdma_ch *ch,
1389                             struct scatterlist *sg, int sg_index)
1390 {
1391         struct srp_target_port *target = ch->target;
1392         struct srp_device *dev = target->srp_host->srp_dev;
1393         struct ib_device *ibdev = dev->dev;
1394         dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1395         unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1396         unsigned int len = 0;
1397         int ret;
1398
1399         WARN_ON_ONCE(!dma_len);
1400
1401         while (dma_len) {
1402                 unsigned offset = dma_addr & ~dev->mr_page_mask;
1403                 if (state->npages == dev->max_pages_per_mr || offset != 0) {
1404                         ret = srp_map_finish_fmr(state, ch);
1405                         if (ret)
1406                                 return ret;
1407                 }
1408
1409                 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1410
1411                 if (!state->npages)
1412                         state->base_dma_addr = dma_addr;
1413                 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1414                 state->dma_len += len;
1415                 dma_addr += len;
1416                 dma_len -= len;
1417         }
1418
1419         /*
1420          * If the last entry of the MR wasn't a full page, then we need to
1421          * close it out and start a new one -- we can only merge at page
1422          * boundaries.
1423          */
1424         ret = 0;
1425         if (len != dev->mr_page_size)
1426                 ret = srp_map_finish_fmr(state, ch);
1427         return ret;
1428 }
1429
1430 static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1431                           struct srp_request *req, struct scatterlist *scat,
1432                           int count)
1433 {
1434         struct scatterlist *sg;
1435         int i, ret;
1436
1437         state->pages = req->map_page;
1438         state->fmr.next = req->fmr_list;
1439         state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
1440
1441         for_each_sg(scat, sg, count, i) {
1442                 ret = srp_map_sg_entry(state, ch, sg, i);
1443                 if (ret)
1444                         return ret;
1445         }
1446
1447         ret = srp_map_finish_fmr(state, ch);
1448         if (ret)
1449                 return ret;
1450
1451         return 0;
1452 }
1453
1454 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1455                          struct srp_request *req, struct scatterlist *scat,
1456                          int count)
1457 {
1458         unsigned int sg_offset = 0;
1459
1460         state->desc = req->indirect_desc;
1461         state->fr.next = req->fr_list;
1462         state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1463         state->sg = scat;
1464
1465         if (count == 0)
1466                 return 0;
1467
1468         while (count) {
1469                 int i, n;
1470
1471                 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1472                 if (unlikely(n < 0))
1473                         return n;
1474
1475                 count -= n;
1476                 for (i = 0; i < n; i++)
1477                         state->sg = sg_next(state->sg);
1478         }
1479
1480         return 0;
1481 }
1482
1483 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1484                           struct srp_request *req, struct scatterlist *scat,
1485                           int count)
1486 {
1487         struct srp_target_port *target = ch->target;
1488         struct srp_device *dev = target->srp_host->srp_dev;
1489         struct scatterlist *sg;
1490         int i;
1491
1492         state->desc = req->indirect_desc;
1493         for_each_sg(scat, sg, count, i) {
1494                 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1495                              ib_sg_dma_len(dev->dev, sg),
1496                              target->global_mr->rkey);
1497         }
1498
1499         return 0;
1500 }
1501
1502 /*
1503  * Register the indirect data buffer descriptor with the HCA.
1504  *
1505  * Note: since the indirect data buffer descriptor has been allocated with
1506  * kmalloc() it is guaranteed that this buffer is a physically contiguous
1507  * memory buffer.
1508  */
1509 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1510                        void **next_mr, void **end_mr, u32 idb_len,
1511                        __be32 *idb_rkey)
1512 {
1513         struct srp_target_port *target = ch->target;
1514         struct srp_device *dev = target->srp_host->srp_dev;
1515         struct srp_map_state state;
1516         struct srp_direct_buf idb_desc;
1517         u64 idb_pages[1];
1518         struct scatterlist idb_sg[1];
1519         int ret;
1520
1521         memset(&state, 0, sizeof(state));
1522         memset(&idb_desc, 0, sizeof(idb_desc));
1523         state.gen.next = next_mr;
1524         state.gen.end = end_mr;
1525         state.desc = &idb_desc;
1526         state.base_dma_addr = req->indirect_dma_addr;
1527         state.dma_len = idb_len;
1528
1529         if (dev->use_fast_reg) {
1530                 state.sg = idb_sg;
1531                 sg_init_one(idb_sg, req->indirect_desc, idb_len);
1532                 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1533 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1534                 idb_sg->dma_length = idb_sg->length;          /* hack^2 */
1535 #endif
1536                 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1537                 if (ret < 0)
1538                         return ret;
1539                 WARN_ON_ONCE(ret < 1);
1540         } else if (dev->use_fmr) {
1541                 state.pages = idb_pages;
1542                 state.pages[0] = (req->indirect_dma_addr &
1543                                   dev->mr_page_mask);
1544                 state.npages = 1;
1545                 ret = srp_map_finish_fmr(&state, ch);
1546                 if (ret < 0)
1547                         return ret;
1548         } else {
1549                 return -EINVAL;
1550         }
1551
1552         *idb_rkey = idb_desc.key;
1553
1554         return 0;
1555 }
1556
1557 #if defined(DYNAMIC_DATA_DEBUG)
1558 static void srp_check_mapping(struct srp_map_state *state,
1559                               struct srp_rdma_ch *ch, struct srp_request *req,
1560                               struct scatterlist *scat, int count)
1561 {
1562         struct srp_device *dev = ch->target->srp_host->srp_dev;
1563         struct srp_fr_desc **pfr;
1564         u64 desc_len = 0, mr_len = 0;
1565         int i;
1566
1567         for (i = 0; i < state->ndesc; i++)
1568                 desc_len += be32_to_cpu(req->indirect_desc[i].len);
1569         if (dev->use_fast_reg)
1570                 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1571                         mr_len += (*pfr)->mr->length;
1572         else if (dev->use_fmr)
1573                 for (i = 0; i < state->nmdesc; i++)
1574                         mr_len += be32_to_cpu(req->indirect_desc[i].len);
1575         if (desc_len != scsi_bufflen(req->scmnd) ||
1576             mr_len > scsi_bufflen(req->scmnd))
1577                 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1578                        scsi_bufflen(req->scmnd), desc_len, mr_len,
1579                        state->ndesc, state->nmdesc);
1580 }
1581 #endif
1582
1583 /**
1584  * srp_map_data() - map SCSI data buffer onto an SRP request
1585  * @scmnd: SCSI command to map
1586  * @ch: SRP RDMA channel
1587  * @req: SRP request
1588  *
1589  * Returns the length in bytes of the SRP_CMD IU or a negative value if
1590  * mapping failed.
1591  */
1592 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1593                         struct srp_request *req)
1594 {
1595         struct srp_target_port *target = ch->target;
1596         struct scatterlist *scat;
1597         struct srp_cmd *cmd = req->cmd->buf;
1598         int len, nents, count, ret;
1599         struct srp_device *dev;
1600         struct ib_device *ibdev;
1601         struct srp_map_state state;
1602         struct srp_indirect_buf *indirect_hdr;
1603         u32 idb_len, table_len;
1604         __be32 idb_rkey;
1605         u8 fmt;
1606
1607         if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1608                 return sizeof (struct srp_cmd);
1609
1610         if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1611             scmnd->sc_data_direction != DMA_TO_DEVICE) {
1612                 shost_printk(KERN_WARNING, target->scsi_host,
1613                              PFX "Unhandled data direction %d\n",
1614                              scmnd->sc_data_direction);
1615                 return -EINVAL;
1616         }
1617
1618         nents = scsi_sg_count(scmnd);
1619         scat  = scsi_sglist(scmnd);
1620
1621         dev = target->srp_host->srp_dev;
1622         ibdev = dev->dev;
1623
1624         count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1625         if (unlikely(count == 0))
1626                 return -EIO;
1627
1628         fmt = SRP_DATA_DESC_DIRECT;
1629         len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1630
1631         if (count == 1 && target->global_mr) {
1632                 /*
1633                  * The midlayer only generated a single gather/scatter
1634                  * entry, or DMA mapping coalesced everything to a
1635                  * single entry.  So a direct descriptor along with
1636                  * the DMA MR suffices.
1637                  */
1638                 struct srp_direct_buf *buf = (void *) cmd->add_data;
1639
1640                 buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1641                 buf->key = cpu_to_be32(target->global_mr->rkey);
1642                 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1643
1644                 req->nmdesc = 0;
1645                 goto map_complete;
1646         }
1647
1648         /*
1649          * We have more than one scatter/gather entry, so build our indirect
1650          * descriptor table, trying to merge as many entries as we can.
1651          */
1652         indirect_hdr = (void *) cmd->add_data;
1653
1654         ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1655                                    target->indirect_size, DMA_TO_DEVICE);
1656
1657         memset(&state, 0, sizeof(state));
1658         if (dev->use_fast_reg)
1659                 ret = srp_map_sg_fr(&state, ch, req, scat, count);
1660         else if (dev->use_fmr)
1661                 ret = srp_map_sg_fmr(&state, ch, req, scat, count);
1662         else
1663                 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1664         req->nmdesc = state.nmdesc;
1665         if (ret < 0)
1666                 goto unmap;
1667
1668 #if defined(DYNAMIC_DEBUG)
1669         {
1670                 DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1671                         "Memory mapping consistency check");
1672                 if (unlikely(ddm.flags & _DPRINTK_FLAGS_PRINT))
1673                         srp_check_mapping(&state, ch, req, scat, count);
1674         }
1675 #endif
1676
1677         /* We've mapped the request, now pull as much of the indirect
1678          * descriptor table as we can into the command buffer. If this
1679          * target is not using an external indirect table, we are
1680          * guaranteed to fit into the command, as the SCSI layer won't
1681          * give us more S/G entries than we allow.
1682          */
1683         if (state.ndesc == 1) {
1684                 /*
1685                  * Memory registration collapsed the sg-list into one entry,
1686                  * so use a direct descriptor.
1687                  */
1688                 struct srp_direct_buf *buf = (void *) cmd->add_data;
1689
1690                 *buf = req->indirect_desc[0];
1691                 goto map_complete;
1692         }
1693
1694         if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1695                                                 !target->allow_ext_sg)) {
1696                 shost_printk(KERN_ERR, target->scsi_host,
1697                              "Could not fit S/G list into SRP_CMD\n");
1698                 ret = -EIO;
1699                 goto unmap;
1700         }
1701
1702         count = min(state.ndesc, target->cmd_sg_cnt);
1703         table_len = state.ndesc * sizeof (struct srp_direct_buf);
1704         idb_len = sizeof(struct srp_indirect_buf) + table_len;
1705
1706         fmt = SRP_DATA_DESC_INDIRECT;
1707         len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1708         len += count * sizeof (struct srp_direct_buf);
1709
1710         memcpy(indirect_hdr->desc_list, req->indirect_desc,
1711                count * sizeof (struct srp_direct_buf));
1712
1713         if (!target->global_mr) {
1714                 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1715                                   idb_len, &idb_rkey);
1716                 if (ret < 0)
1717                         goto unmap;
1718                 req->nmdesc++;
1719         } else {
1720                 idb_rkey = cpu_to_be32(target->global_mr->rkey);
1721         }
1722
1723         indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1724         indirect_hdr->table_desc.key = idb_rkey;
1725         indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1726         indirect_hdr->len = cpu_to_be32(state.total_len);
1727
1728         if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1729                 cmd->data_out_desc_cnt = count;
1730         else
1731                 cmd->data_in_desc_cnt = count;
1732
1733         ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1734                                       DMA_TO_DEVICE);
1735
1736 map_complete:
1737         if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1738                 cmd->buf_fmt = fmt << 4;
1739         else
1740                 cmd->buf_fmt = fmt;
1741
1742         return len;
1743
1744 unmap:
1745         srp_unmap_data(scmnd, ch, req);
1746         if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1747                 ret = -E2BIG;
1748         return ret;
1749 }
1750
1751 /*
1752  * Return an IU and possible credit to the free pool
1753  */
1754 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1755                           enum srp_iu_type iu_type)
1756 {
1757         unsigned long flags;
1758
1759         spin_lock_irqsave(&ch->lock, flags);
1760         list_add(&iu->list, &ch->free_tx);
1761         if (iu_type != SRP_IU_RSP)
1762                 ++ch->req_lim;
1763         spin_unlock_irqrestore(&ch->lock, flags);
1764 }
1765
1766 /*
1767  * Must be called with ch->lock held to protect req_lim and free_tx.
1768  * If IU is not sent, it must be returned using srp_put_tx_iu().
1769  *
1770  * Note:
1771  * An upper limit for the number of allocated information units for each
1772  * request type is:
1773  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1774  *   more than Scsi_Host.can_queue requests.
1775  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1776  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1777  *   one unanswered SRP request to an initiator.
1778  */
1779 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1780                                       enum srp_iu_type iu_type)
1781 {
1782         struct srp_target_port *target = ch->target;
1783         s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1784         struct srp_iu *iu;
1785
1786         ib_process_cq_direct(ch->send_cq, -1);
1787
1788         if (list_empty(&ch->free_tx))
1789                 return NULL;
1790
1791         /* Initiator responses to target requests do not consume credits */
1792         if (iu_type != SRP_IU_RSP) {
1793                 if (ch->req_lim <= rsv) {
1794                         ++target->zero_req_lim;
1795                         return NULL;
1796                 }
1797
1798                 --ch->req_lim;
1799         }
1800
1801         iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1802         list_del(&iu->list);
1803         return iu;
1804 }
1805
1806 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1807 {
1808         struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1809         struct srp_rdma_ch *ch = cq->cq_context;
1810
1811         if (unlikely(wc->status != IB_WC_SUCCESS)) {
1812                 srp_handle_qp_err(cq, wc, "SEND");
1813                 return;
1814         }
1815
1816         list_add(&iu->list, &ch->free_tx);
1817 }
1818
1819 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1820 {
1821         struct srp_target_port *target = ch->target;
1822         struct ib_sge list;
1823         struct ib_send_wr wr, *bad_wr;
1824
1825         list.addr   = iu->dma;
1826         list.length = len;
1827         list.lkey   = target->lkey;
1828
1829         iu->cqe.done = srp_send_done;
1830
1831         wr.next       = NULL;
1832         wr.wr_cqe     = &iu->cqe;
1833         wr.sg_list    = &list;
1834         wr.num_sge    = 1;
1835         wr.opcode     = IB_WR_SEND;
1836         wr.send_flags = IB_SEND_SIGNALED;
1837
1838         return ib_post_send(ch->qp, &wr, &bad_wr);
1839 }
1840
1841 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1842 {
1843         struct srp_target_port *target = ch->target;
1844         struct ib_recv_wr wr, *bad_wr;
1845         struct ib_sge list;
1846
1847         list.addr   = iu->dma;
1848         list.length = iu->size;
1849         list.lkey   = target->lkey;
1850
1851         iu->cqe.done = srp_recv_done;
1852
1853         wr.next     = NULL;
1854         wr.wr_cqe   = &iu->cqe;
1855         wr.sg_list  = &list;
1856         wr.num_sge  = 1;
1857
1858         return ib_post_recv(ch->qp, &wr, &bad_wr);
1859 }
1860
1861 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1862 {
1863         struct srp_target_port *target = ch->target;
1864         struct srp_request *req;
1865         struct scsi_cmnd *scmnd;
1866         unsigned long flags;
1867
1868         if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1869                 spin_lock_irqsave(&ch->lock, flags);
1870                 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1871                 spin_unlock_irqrestore(&ch->lock, flags);
1872
1873                 ch->tsk_mgmt_status = -1;
1874                 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1875                         ch->tsk_mgmt_status = rsp->data[3];
1876                 complete(&ch->tsk_mgmt_done);
1877         } else {
1878                 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1879                 if (scmnd) {
1880                         req = (void *)scmnd->host_scribble;
1881                         scmnd = srp_claim_req(ch, req, NULL, scmnd);
1882                 }
1883                 if (!scmnd) {
1884                         shost_printk(KERN_ERR, target->scsi_host,
1885                                      "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1886                                      rsp->tag, ch - target->ch, ch->qp->qp_num);
1887
1888                         spin_lock_irqsave(&ch->lock, flags);
1889                         ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1890                         spin_unlock_irqrestore(&ch->lock, flags);
1891
1892                         return;
1893                 }
1894                 scmnd->result = rsp->status;
1895
1896                 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1897                         memcpy(scmnd->sense_buffer, rsp->data +
1898                                be32_to_cpu(rsp->resp_data_len),
1899                                min_t(int, be32_to_cpu(rsp->sense_data_len),
1900                                      SCSI_SENSE_BUFFERSIZE));
1901                 }
1902
1903                 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1904                         scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1905                 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1906                         scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1907                 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1908                         scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1909                 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1910                         scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1911
1912                 srp_free_req(ch, req, scmnd,
1913                              be32_to_cpu(rsp->req_lim_delta));
1914
1915                 scmnd->host_scribble = NULL;
1916                 scmnd->scsi_done(scmnd);
1917         }
1918 }
1919
1920 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1921                                void *rsp, int len)
1922 {
1923         struct srp_target_port *target = ch->target;
1924         struct ib_device *dev = target->srp_host->srp_dev->dev;
1925         unsigned long flags;
1926         struct srp_iu *iu;
1927         int err;
1928
1929         spin_lock_irqsave(&ch->lock, flags);
1930         ch->req_lim += req_delta;
1931         iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1932         spin_unlock_irqrestore(&ch->lock, flags);
1933
1934         if (!iu) {
1935                 shost_printk(KERN_ERR, target->scsi_host, PFX
1936                              "no IU available to send response\n");
1937                 return 1;
1938         }
1939
1940         ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1941         memcpy(iu->buf, rsp, len);
1942         ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1943
1944         err = srp_post_send(ch, iu, len);
1945         if (err) {
1946                 shost_printk(KERN_ERR, target->scsi_host, PFX
1947                              "unable to post response: %d\n", err);
1948                 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1949         }
1950
1951         return err;
1952 }
1953
1954 static void srp_process_cred_req(struct srp_rdma_ch *ch,
1955                                  struct srp_cred_req *req)
1956 {
1957         struct srp_cred_rsp rsp = {
1958                 .opcode = SRP_CRED_RSP,
1959                 .tag = req->tag,
1960         };
1961         s32 delta = be32_to_cpu(req->req_lim_delta);
1962
1963         if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1964                 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1965                              "problems processing SRP_CRED_REQ\n");
1966 }
1967
1968 static void srp_process_aer_req(struct srp_rdma_ch *ch,
1969                                 struct srp_aer_req *req)
1970 {
1971         struct srp_target_port *target = ch->target;
1972         struct srp_aer_rsp rsp = {
1973                 .opcode = SRP_AER_RSP,
1974                 .tag = req->tag,
1975         };
1976         s32 delta = be32_to_cpu(req->req_lim_delta);
1977
1978         shost_printk(KERN_ERR, target->scsi_host, PFX
1979                      "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
1980
1981         if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1982                 shost_printk(KERN_ERR, target->scsi_host, PFX
1983                              "problems processing SRP_AER_REQ\n");
1984 }
1985
1986 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1987 {
1988         struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1989         struct srp_rdma_ch *ch = cq->cq_context;
1990         struct srp_target_port *target = ch->target;
1991         struct ib_device *dev = target->srp_host->srp_dev->dev;
1992         int res;
1993         u8 opcode;
1994
1995         if (unlikely(wc->status != IB_WC_SUCCESS)) {
1996                 srp_handle_qp_err(cq, wc, "RECV");
1997                 return;
1998         }
1999
2000         ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2001                                    DMA_FROM_DEVICE);
2002
2003         opcode = *(u8 *) iu->buf;
2004
2005         if (0) {
2006                 shost_printk(KERN_ERR, target->scsi_host,
2007                              PFX "recv completion, opcode 0x%02x\n", opcode);
2008                 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2009                                iu->buf, wc->byte_len, true);
2010         }
2011
2012         switch (opcode) {
2013         case SRP_RSP:
2014                 srp_process_rsp(ch, iu->buf);
2015                 break;
2016
2017         case SRP_CRED_REQ:
2018                 srp_process_cred_req(ch, iu->buf);
2019                 break;
2020
2021         case SRP_AER_REQ:
2022                 srp_process_aer_req(ch, iu->buf);
2023                 break;
2024
2025         case SRP_T_LOGOUT:
2026                 /* XXX Handle target logout */
2027                 shost_printk(KERN_WARNING, target->scsi_host,
2028                              PFX "Got target logout request\n");
2029                 break;
2030
2031         default:
2032                 shost_printk(KERN_WARNING, target->scsi_host,
2033                              PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2034                 break;
2035         }
2036
2037         ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2038                                       DMA_FROM_DEVICE);
2039
2040         res = srp_post_recv(ch, iu);
2041         if (res != 0)
2042                 shost_printk(KERN_ERR, target->scsi_host,
2043                              PFX "Recv failed with error code %d\n", res);
2044 }
2045
2046 /**
2047  * srp_tl_err_work() - handle a transport layer error
2048  * @work: Work structure embedded in an SRP target port.
2049  *
2050  * Note: This function may get invoked before the rport has been created,
2051  * hence the target->rport test.
2052  */
2053 static void srp_tl_err_work(struct work_struct *work)
2054 {
2055         struct srp_target_port *target;
2056
2057         target = container_of(work, struct srp_target_port, tl_err_work);
2058         if (target->rport)
2059                 srp_start_tl_fail_timers(target->rport);
2060 }
2061
2062 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2063                 const char *opname)
2064 {
2065         struct srp_rdma_ch *ch = cq->cq_context;
2066         struct srp_target_port *target = ch->target;
2067
2068         if (ch->connected && !target->qp_in_error) {
2069                 shost_printk(KERN_ERR, target->scsi_host,
2070                              PFX "failed %s status %s (%d) for CQE %p\n",
2071                              opname, ib_wc_status_msg(wc->status), wc->status,
2072                              wc->wr_cqe);
2073                 queue_work(system_long_wq, &target->tl_err_work);
2074         }
2075         target->qp_in_error = true;
2076 }
2077
2078 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2079 {
2080         struct srp_target_port *target = host_to_target(shost);
2081         struct srp_rport *rport = target->rport;
2082         struct srp_rdma_ch *ch;
2083         struct srp_request *req;
2084         struct srp_iu *iu;
2085         struct srp_cmd *cmd;
2086         struct ib_device *dev;
2087         unsigned long flags;
2088         u32 tag;
2089         u16 idx;
2090         int len, ret;
2091         const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2092
2093         /*
2094          * The SCSI EH thread is the only context from which srp_queuecommand()
2095          * can get invoked for blocked devices (SDEV_BLOCK /
2096          * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2097          * locking the rport mutex if invoked from inside the SCSI EH.
2098          */
2099         if (in_scsi_eh)
2100                 mutex_lock(&rport->mutex);
2101
2102         scmnd->result = srp_chkready(target->rport);
2103         if (unlikely(scmnd->result))
2104                 goto err;
2105
2106         WARN_ON_ONCE(scmnd->request->tag < 0);
2107         tag = blk_mq_unique_tag(scmnd->request);
2108         ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2109         idx = blk_mq_unique_tag_to_tag(tag);
2110         WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2111                   dev_name(&shost->shost_gendev), tag, idx,
2112                   target->req_ring_size);
2113
2114         spin_lock_irqsave(&ch->lock, flags);
2115         iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2116         spin_unlock_irqrestore(&ch->lock, flags);
2117
2118         if (!iu)
2119                 goto err;
2120
2121         req = &ch->req_ring[idx];
2122         dev = target->srp_host->srp_dev->dev;
2123         ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2124                                    DMA_TO_DEVICE);
2125
2126         scmnd->host_scribble = (void *) req;
2127
2128         cmd = iu->buf;
2129         memset(cmd, 0, sizeof *cmd);
2130
2131         cmd->opcode = SRP_CMD;
2132         int_to_scsilun(scmnd->device->lun, &cmd->lun);
2133         cmd->tag    = tag;
2134         memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2135
2136         req->scmnd    = scmnd;
2137         req->cmd      = iu;
2138
2139         len = srp_map_data(scmnd, ch, req);
2140         if (len < 0) {
2141                 shost_printk(KERN_ERR, target->scsi_host,
2142                              PFX "Failed to map data (%d)\n", len);
2143                 /*
2144                  * If we ran out of memory descriptors (-ENOMEM) because an
2145                  * application is queuing many requests with more than
2146                  * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2147                  * to reduce queue depth temporarily.
2148                  */
2149                 scmnd->result = len == -ENOMEM ?
2150                         DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2151                 goto err_iu;
2152         }
2153
2154         ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2155                                       DMA_TO_DEVICE);
2156
2157         if (srp_post_send(ch, iu, len)) {
2158                 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2159                 goto err_unmap;
2160         }
2161
2162         ret = 0;
2163
2164 unlock_rport:
2165         if (in_scsi_eh)
2166                 mutex_unlock(&rport->mutex);
2167
2168         return ret;
2169
2170 err_unmap:
2171         srp_unmap_data(scmnd, ch, req);
2172
2173 err_iu:
2174         srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2175
2176         /*
2177          * Avoid that the loops that iterate over the request ring can
2178          * encounter a dangling SCSI command pointer.
2179          */
2180         req->scmnd = NULL;
2181
2182 err:
2183         if (scmnd->result) {
2184                 scmnd->scsi_done(scmnd);
2185                 ret = 0;
2186         } else {
2187                 ret = SCSI_MLQUEUE_HOST_BUSY;
2188         }
2189
2190         goto unlock_rport;
2191 }
2192
2193 /*
2194  * Note: the resources allocated in this function are freed in
2195  * srp_free_ch_ib().
2196  */
2197 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2198 {
2199         struct srp_target_port *target = ch->target;
2200         int i;
2201
2202         ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2203                               GFP_KERNEL);
2204         if (!ch->rx_ring)
2205                 goto err_no_ring;
2206         ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2207                               GFP_KERNEL);
2208         if (!ch->tx_ring)
2209                 goto err_no_ring;
2210
2211         for (i = 0; i < target->queue_size; ++i) {
2212                 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2213                                               ch->max_ti_iu_len,
2214                                               GFP_KERNEL, DMA_FROM_DEVICE);
2215                 if (!ch->rx_ring[i])
2216                         goto err;
2217         }
2218
2219         for (i = 0; i < target->queue_size; ++i) {
2220                 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2221                                               target->max_iu_len,
2222                                               GFP_KERNEL, DMA_TO_DEVICE);
2223                 if (!ch->tx_ring[i])
2224                         goto err;
2225
2226                 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2227         }
2228
2229         return 0;
2230
2231 err:
2232         for (i = 0; i < target->queue_size; ++i) {
2233                 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2234                 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2235         }
2236
2237
2238 err_no_ring:
2239         kfree(ch->tx_ring);
2240         ch->tx_ring = NULL;
2241         kfree(ch->rx_ring);
2242         ch->rx_ring = NULL;
2243
2244         return -ENOMEM;
2245 }
2246
2247 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2248 {
2249         uint64_t T_tr_ns, max_compl_time_ms;
2250         uint32_t rq_tmo_jiffies;
2251
2252         /*
2253          * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2254          * table 91), both the QP timeout and the retry count have to be set
2255          * for RC QP's during the RTR to RTS transition.
2256          */
2257         WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2258                      (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2259
2260         /*
2261          * Set target->rq_tmo_jiffies to one second more than the largest time
2262          * it can take before an error completion is generated. See also
2263          * C9-140..142 in the IBTA spec for more information about how to
2264          * convert the QP Local ACK Timeout value to nanoseconds.
2265          */
2266         T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2267         max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2268         do_div(max_compl_time_ms, NSEC_PER_MSEC);
2269         rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2270
2271         return rq_tmo_jiffies;
2272 }
2273
2274 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2275                                const struct srp_login_rsp *lrsp,
2276                                struct srp_rdma_ch *ch)
2277 {
2278         struct srp_target_port *target = ch->target;
2279         struct ib_qp_attr *qp_attr = NULL;
2280         int attr_mask = 0;
2281         int ret;
2282         int i;
2283
2284         if (lrsp->opcode == SRP_LOGIN_RSP) {
2285                 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2286                 ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2287
2288                 /*
2289                  * Reserve credits for task management so we don't
2290                  * bounce requests back to the SCSI mid-layer.
2291                  */
2292                 target->scsi_host->can_queue
2293                         = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2294                               target->scsi_host->can_queue);
2295                 target->scsi_host->cmd_per_lun
2296                         = min_t(int, target->scsi_host->can_queue,
2297                                 target->scsi_host->cmd_per_lun);
2298         } else {
2299                 shost_printk(KERN_WARNING, target->scsi_host,
2300                              PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2301                 ret = -ECONNRESET;
2302                 goto error;
2303         }
2304
2305         if (!ch->rx_ring) {
2306                 ret = srp_alloc_iu_bufs(ch);
2307                 if (ret)
2308                         goto error;
2309         }
2310
2311         ret = -ENOMEM;
2312         qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2313         if (!qp_attr)
2314                 goto error;
2315
2316         qp_attr->qp_state = IB_QPS_RTR;
2317         ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2318         if (ret)
2319                 goto error_free;
2320
2321         ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2322         if (ret)
2323                 goto error_free;
2324
2325         for (i = 0; i < target->queue_size; i++) {
2326                 struct srp_iu *iu = ch->rx_ring[i];
2327
2328                 ret = srp_post_recv(ch, iu);
2329                 if (ret)
2330                         goto error_free;
2331         }
2332
2333         qp_attr->qp_state = IB_QPS_RTS;
2334         ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2335         if (ret)
2336                 goto error_free;
2337
2338         target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2339
2340         ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2341         if (ret)
2342                 goto error_free;
2343
2344         ret = ib_send_cm_rtu(cm_id, NULL, 0);
2345
2346 error_free:
2347         kfree(qp_attr);
2348
2349 error:
2350         ch->status = ret;
2351 }
2352
2353 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2354                                struct ib_cm_event *event,
2355                                struct srp_rdma_ch *ch)
2356 {
2357         struct srp_target_port *target = ch->target;
2358         struct Scsi_Host *shost = target->scsi_host;
2359         struct ib_class_port_info *cpi;
2360         int opcode;
2361
2362         switch (event->param.rej_rcvd.reason) {
2363         case IB_CM_REJ_PORT_CM_REDIRECT:
2364                 cpi = event->param.rej_rcvd.ari;
2365                 ch->path.dlid = cpi->redirect_lid;
2366                 ch->path.pkey = cpi->redirect_pkey;
2367                 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2368                 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2369
2370                 ch->status = ch->path.dlid ?
2371                         SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2372                 break;
2373
2374         case IB_CM_REJ_PORT_REDIRECT:
2375                 if (srp_target_is_topspin(target)) {
2376                         /*
2377                          * Topspin/Cisco SRP gateways incorrectly send
2378                          * reject reason code 25 when they mean 24
2379                          * (port redirect).
2380                          */
2381                         memcpy(ch->path.dgid.raw,
2382                                event->param.rej_rcvd.ari, 16);
2383
2384                         shost_printk(KERN_DEBUG, shost,
2385                                      PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2386                                      be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2387                                      be64_to_cpu(ch->path.dgid.global.interface_id));
2388
2389                         ch->status = SRP_PORT_REDIRECT;
2390                 } else {
2391                         shost_printk(KERN_WARNING, shost,
2392                                      "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2393                         ch->status = -ECONNRESET;
2394                 }
2395                 break;
2396
2397         case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2398                 shost_printk(KERN_WARNING, shost,
2399                             "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2400                 ch->status = -ECONNRESET;
2401                 break;
2402
2403         case IB_CM_REJ_CONSUMER_DEFINED:
2404                 opcode = *(u8 *) event->private_data;
2405                 if (opcode == SRP_LOGIN_REJ) {
2406                         struct srp_login_rej *rej = event->private_data;
2407                         u32 reason = be32_to_cpu(rej->reason);
2408
2409                         if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2410                                 shost_printk(KERN_WARNING, shost,
2411                                              PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2412                         else
2413                                 shost_printk(KERN_WARNING, shost, PFX
2414                                              "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2415                                              target->sgid.raw,
2416                                              target->orig_dgid.raw, reason);
2417                 } else
2418                         shost_printk(KERN_WARNING, shost,
2419                                      "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2420                                      " opcode 0x%02x\n", opcode);
2421                 ch->status = -ECONNRESET;
2422                 break;
2423
2424         case IB_CM_REJ_STALE_CONN:
2425                 shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
2426                 ch->status = SRP_STALE_CONN;
2427                 break;
2428
2429         default:
2430                 shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2431                              event->param.rej_rcvd.reason);
2432                 ch->status = -ECONNRESET;
2433         }
2434 }
2435
2436 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2437 {
2438         struct srp_rdma_ch *ch = cm_id->context;
2439         struct srp_target_port *target = ch->target;
2440         int comp = 0;
2441
2442         switch (event->event) {
2443         case IB_CM_REQ_ERROR:
2444                 shost_printk(KERN_DEBUG, target->scsi_host,
2445                              PFX "Sending CM REQ failed\n");
2446                 comp = 1;
2447                 ch->status = -ECONNRESET;
2448                 break;
2449
2450         case IB_CM_REP_RECEIVED:
2451                 comp = 1;
2452                 srp_cm_rep_handler(cm_id, event->private_data, ch);
2453                 break;
2454
2455         case IB_CM_REJ_RECEIVED:
2456                 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2457                 comp = 1;
2458
2459                 srp_cm_rej_handler(cm_id, event, ch);
2460                 break;
2461
2462         case IB_CM_DREQ_RECEIVED:
2463                 shost_printk(KERN_WARNING, target->scsi_host,
2464                              PFX "DREQ received - connection closed\n");
2465                 ch->connected = false;
2466                 if (ib_send_cm_drep(cm_id, NULL, 0))
2467                         shost_printk(KERN_ERR, target->scsi_host,
2468                                      PFX "Sending CM DREP failed\n");
2469                 queue_work(system_long_wq, &target->tl_err_work);
2470                 break;
2471
2472         case IB_CM_TIMEWAIT_EXIT:
2473                 shost_printk(KERN_ERR, target->scsi_host,
2474                              PFX "connection closed\n");
2475                 comp = 1;
2476
2477                 ch->status = 0;
2478                 break;
2479
2480         case IB_CM_MRA_RECEIVED:
2481         case IB_CM_DREQ_ERROR:
2482         case IB_CM_DREP_RECEIVED:
2483                 break;
2484
2485         default:
2486                 shost_printk(KERN_WARNING, target->scsi_host,
2487                              PFX "Unhandled CM event %d\n", event->event);
2488                 break;
2489         }
2490
2491         if (comp)
2492                 complete(&ch->done);
2493
2494         return 0;
2495 }
2496
2497 /**
2498  * srp_change_queue_depth - setting device queue depth
2499  * @sdev: scsi device struct
2500  * @qdepth: requested queue depth
2501  *
2502  * Returns queue depth.
2503  */
2504 static int
2505 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2506 {
2507         if (!sdev->tagged_supported)
2508                 qdepth = 1;
2509         return scsi_change_queue_depth(sdev, qdepth);
2510 }
2511
2512 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2513                              u8 func)
2514 {
2515         struct srp_target_port *target = ch->target;
2516         struct srp_rport *rport = target->rport;
2517         struct ib_device *dev = target->srp_host->srp_dev->dev;
2518         struct srp_iu *iu;
2519         struct srp_tsk_mgmt *tsk_mgmt;
2520
2521         if (!ch->connected || target->qp_in_error)
2522                 return -1;
2523
2524         init_completion(&ch->tsk_mgmt_done);
2525
2526         /*
2527          * Lock the rport mutex to avoid that srp_create_ch_ib() is
2528          * invoked while a task management function is being sent.
2529          */
2530         mutex_lock(&rport->mutex);
2531         spin_lock_irq(&ch->lock);
2532         iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2533         spin_unlock_irq(&ch->lock);
2534
2535         if (!iu) {
2536                 mutex_unlock(&rport->mutex);
2537
2538                 return -1;
2539         }
2540
2541         ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2542                                    DMA_TO_DEVICE);
2543         tsk_mgmt = iu->buf;
2544         memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2545
2546         tsk_mgmt->opcode        = SRP_TSK_MGMT;
2547         int_to_scsilun(lun, &tsk_mgmt->lun);
2548         tsk_mgmt->tag           = req_tag | SRP_TAG_TSK_MGMT;
2549         tsk_mgmt->tsk_mgmt_func = func;
2550         tsk_mgmt->task_tag      = req_tag;
2551
2552         ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2553                                       DMA_TO_DEVICE);
2554         if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2555                 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2556                 mutex_unlock(&rport->mutex);
2557
2558                 return -1;
2559         }
2560         mutex_unlock(&rport->mutex);
2561
2562         if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2563                                          msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2564                 return -1;
2565
2566         return 0;
2567 }
2568
2569 static int srp_abort(struct scsi_cmnd *scmnd)
2570 {
2571         struct srp_target_port *target = host_to_target(scmnd->device->host);
2572         struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2573         u32 tag;
2574         u16 ch_idx;
2575         struct srp_rdma_ch *ch;
2576         int ret;
2577
2578         shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2579
2580         if (!req)
2581                 return SUCCESS;
2582         tag = blk_mq_unique_tag(scmnd->request);
2583         ch_idx = blk_mq_unique_tag_to_hwq(tag);
2584         if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2585                 return SUCCESS;
2586         ch = &target->ch[ch_idx];
2587         if (!srp_claim_req(ch, req, NULL, scmnd))
2588                 return SUCCESS;
2589         shost_printk(KERN_ERR, target->scsi_host,
2590                      "Sending SRP abort for tag %#x\n", tag);
2591         if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2592                               SRP_TSK_ABORT_TASK) == 0)
2593                 ret = SUCCESS;
2594         else if (target->rport->state == SRP_RPORT_LOST)
2595                 ret = FAST_IO_FAIL;
2596         else
2597                 ret = FAILED;
2598         srp_free_req(ch, req, scmnd, 0);
2599         scmnd->result = DID_ABORT << 16;
2600         scmnd->scsi_done(scmnd);
2601
2602         return ret;
2603 }
2604
2605 static int srp_reset_device(struct scsi_cmnd *scmnd)
2606 {
2607         struct srp_target_port *target = host_to_target(scmnd->device->host);
2608         struct srp_rdma_ch *ch;
2609         int i;
2610
2611         shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2612
2613         ch = &target->ch[0];
2614         if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2615                               SRP_TSK_LUN_RESET))
2616                 return FAILED;
2617         if (ch->tsk_mgmt_status)
2618                 return FAILED;
2619
2620         for (i = 0; i < target->ch_count; i++) {
2621                 ch = &target->ch[i];
2622                 for (i = 0; i < target->req_ring_size; ++i) {
2623                         struct srp_request *req = &ch->req_ring[i];
2624
2625                         srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2626                 }
2627         }
2628
2629         return SUCCESS;
2630 }
2631
2632 static int srp_reset_host(struct scsi_cmnd *scmnd)
2633 {
2634         struct srp_target_port *target = host_to_target(scmnd->device->host);
2635
2636         shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2637
2638         return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2639 }
2640
2641 static int srp_slave_alloc(struct scsi_device *sdev)
2642 {
2643         struct Scsi_Host *shost = sdev->host;
2644         struct srp_target_port *target = host_to_target(shost);
2645         struct srp_device *srp_dev = target->srp_host->srp_dev;
2646         struct ib_device *ibdev = srp_dev->dev;
2647
2648         if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
2649                 blk_queue_virt_boundary(sdev->request_queue,
2650                                         ~srp_dev->mr_page_mask);
2651
2652         return 0;
2653 }
2654
2655 static int srp_slave_configure(struct scsi_device *sdev)
2656 {
2657         struct Scsi_Host *shost = sdev->host;
2658         struct srp_target_port *target = host_to_target(shost);
2659         struct request_queue *q = sdev->request_queue;
2660         unsigned long timeout;
2661
2662         if (sdev->type == TYPE_DISK) {
2663                 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2664                 blk_queue_rq_timeout(q, timeout);
2665         }
2666
2667         return 0;
2668 }
2669
2670 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2671                            char *buf)
2672 {
2673         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2674
2675         return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2676 }
2677
2678 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2679                              char *buf)
2680 {
2681         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2682
2683         return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2684 }
2685
2686 static ssize_t show_service_id(struct device *dev,
2687                                struct device_attribute *attr, char *buf)
2688 {
2689         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2690
2691         return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
2692 }
2693
2694 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2695                          char *buf)
2696 {
2697         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2698
2699         return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2700 }
2701
2702 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2703                          char *buf)
2704 {
2705         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2706
2707         return sprintf(buf, "%pI6\n", target->sgid.raw);
2708 }
2709
2710 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2711                          char *buf)
2712 {
2713         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2714         struct srp_rdma_ch *ch = &target->ch[0];
2715
2716         return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2717 }
2718
2719 static ssize_t show_orig_dgid(struct device *dev,
2720                               struct device_attribute *attr, char *buf)
2721 {
2722         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2723
2724         return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2725 }
2726
2727 static ssize_t show_req_lim(struct device *dev,
2728                             struct device_attribute *attr, char *buf)
2729 {
2730         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2731         struct srp_rdma_ch *ch;
2732         int i, req_lim = INT_MAX;
2733
2734         for (i = 0; i < target->ch_count; i++) {
2735                 ch = &target->ch[i];
2736                 req_lim = min(req_lim, ch->req_lim);
2737         }
2738         return sprintf(buf, "%d\n", req_lim);
2739 }
2740
2741 static ssize_t show_zero_req_lim(struct device *dev,
2742                                  struct device_attribute *attr, char *buf)
2743 {
2744         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2745
2746         return sprintf(buf, "%d\n", target->zero_req_lim);
2747 }
2748
2749 static ssize_t show_local_ib_port(struct device *dev,
2750                                   struct device_attribute *attr, char *buf)
2751 {
2752         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2753
2754         return sprintf(buf, "%d\n", target->srp_host->port);
2755 }
2756
2757 static ssize_t show_local_ib_device(struct device *dev,
2758                                     struct device_attribute *attr, char *buf)
2759 {
2760         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2761
2762         return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2763 }
2764
2765 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2766                              char *buf)
2767 {
2768         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2769
2770         return sprintf(buf, "%d\n", target->ch_count);
2771 }
2772
2773 static ssize_t show_comp_vector(struct device *dev,
2774                                 struct device_attribute *attr, char *buf)
2775 {
2776         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2777
2778         return sprintf(buf, "%d\n", target->comp_vector);
2779 }
2780
2781 static ssize_t show_tl_retry_count(struct device *dev,
2782                                    struct device_attribute *attr, char *buf)
2783 {
2784         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2785
2786         return sprintf(buf, "%d\n", target->tl_retry_count);
2787 }
2788
2789 static ssize_t show_cmd_sg_entries(struct device *dev,
2790                                    struct device_attribute *attr, char *buf)
2791 {
2792         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2793
2794         return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2795 }
2796
2797 static ssize_t show_allow_ext_sg(struct device *dev,
2798                                  struct device_attribute *attr, char *buf)
2799 {
2800         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2801
2802         return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2803 }
2804
2805 static DEVICE_ATTR(id_ext,          S_IRUGO, show_id_ext,          NULL);
2806 static DEVICE_ATTR(ioc_guid,        S_IRUGO, show_ioc_guid,        NULL);
2807 static DEVICE_ATTR(service_id,      S_IRUGO, show_service_id,      NULL);
2808 static DEVICE_ATTR(pkey,            S_IRUGO, show_pkey,            NULL);
2809 static DEVICE_ATTR(sgid,            S_IRUGO, show_sgid,            NULL);
2810 static DEVICE_ATTR(dgid,            S_IRUGO, show_dgid,            NULL);
2811 static DEVICE_ATTR(orig_dgid,       S_IRUGO, show_orig_dgid,       NULL);
2812 static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
2813 static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,    NULL);
2814 static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
2815 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2816 static DEVICE_ATTR(ch_count,        S_IRUGO, show_ch_count,        NULL);
2817 static DEVICE_ATTR(comp_vector,     S_IRUGO, show_comp_vector,     NULL);
2818 static DEVICE_ATTR(tl_retry_count,  S_IRUGO, show_tl_retry_count,  NULL);
2819 static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
2820 static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
2821
2822 static struct device_attribute *srp_host_attrs[] = {
2823         &dev_attr_id_ext,
2824         &dev_attr_ioc_guid,
2825         &dev_attr_service_id,
2826         &dev_attr_pkey,
2827         &dev_attr_sgid,
2828         &dev_attr_dgid,
2829         &dev_attr_orig_dgid,
2830         &dev_attr_req_lim,
2831         &dev_attr_zero_req_lim,
2832         &dev_attr_local_ib_port,
2833         &dev_attr_local_ib_device,
2834         &dev_attr_ch_count,
2835         &dev_attr_comp_vector,
2836         &dev_attr_tl_retry_count,
2837         &dev_attr_cmd_sg_entries,
2838         &dev_attr_allow_ext_sg,
2839         NULL
2840 };
2841
2842 static struct scsi_host_template srp_template = {
2843         .module                         = THIS_MODULE,
2844         .name                           = "InfiniBand SRP initiator",
2845         .proc_name                      = DRV_NAME,
2846         .slave_alloc                    = srp_slave_alloc,
2847         .slave_configure                = srp_slave_configure,
2848         .info                           = srp_target_info,
2849         .queuecommand                   = srp_queuecommand,
2850         .change_queue_depth             = srp_change_queue_depth,
2851         .eh_abort_handler               = srp_abort,
2852         .eh_device_reset_handler        = srp_reset_device,
2853         .eh_host_reset_handler          = srp_reset_host,
2854         .skip_settle_delay              = true,
2855         .sg_tablesize                   = SRP_DEF_SG_TABLESIZE,
2856         .can_queue                      = SRP_DEFAULT_CMD_SQ_SIZE,
2857         .this_id                        = -1,
2858         .cmd_per_lun                    = SRP_DEFAULT_CMD_SQ_SIZE,
2859         .use_clustering                 = ENABLE_CLUSTERING,
2860         .shost_attrs                    = srp_host_attrs,
2861         .track_queue_depth              = 1,
2862 };
2863
2864 static int srp_sdev_count(struct Scsi_Host *host)
2865 {
2866         struct scsi_device *sdev;
2867         int c = 0;
2868
2869         shost_for_each_device(sdev, host)
2870                 c++;
2871
2872         return c;
2873 }
2874
2875 /*
2876  * Return values:
2877  * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2878  * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2879  *    removal has been scheduled.
2880  * 0 and target->state != SRP_TARGET_REMOVED upon success.
2881  */
2882 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2883 {
2884         struct srp_rport_identifiers ids;
2885         struct srp_rport *rport;
2886
2887         target->state = SRP_TARGET_SCANNING;
2888         sprintf(target->target_name, "SRP.T10:%016llX",
2889                 be64_to_cpu(target->id_ext));
2890
2891         if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2892                 return -ENODEV;
2893
2894         memcpy(ids.port_id, &target->id_ext, 8);
2895         memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2896         ids.roles = SRP_RPORT_ROLE_TARGET;
2897         rport = srp_rport_add(target->scsi_host, &ids);
2898         if (IS_ERR(rport)) {
2899                 scsi_remove_host(target->scsi_host);
2900                 return PTR_ERR(rport);
2901         }
2902
2903         rport->lld_data = target;
2904         target->rport = rport;
2905
2906         spin_lock(&host->target_lock);
2907         list_add_tail(&target->list, &host->target_list);
2908         spin_unlock(&host->target_lock);
2909
2910         scsi_scan_target(&target->scsi_host->shost_gendev,
2911                          0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
2912
2913         if (srp_connected_ch(target) < target->ch_count ||
2914             target->qp_in_error) {
2915                 shost_printk(KERN_INFO, target->scsi_host,
2916                              PFX "SCSI scan failed - removing SCSI host\n");
2917                 srp_queue_remove_work(target);
2918                 goto out;
2919         }
2920
2921         pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
2922                  dev_name(&target->scsi_host->shost_gendev),
2923                  srp_sdev_count(target->scsi_host));
2924
2925         spin_lock_irq(&target->lock);
2926         if (target->state == SRP_TARGET_SCANNING)
2927                 target->state = SRP_TARGET_LIVE;
2928         spin_unlock_irq(&target->lock);
2929
2930 out:
2931         return 0;
2932 }
2933
2934 static void srp_release_dev(struct device *dev)
2935 {
2936         struct srp_host *host =
2937                 container_of(dev, struct srp_host, dev);
2938
2939         complete(&host->released);
2940 }
2941
2942 static struct class srp_class = {
2943         .name    = "infiniband_srp",
2944         .dev_release = srp_release_dev
2945 };
2946
2947 /**
2948  * srp_conn_unique() - check whether the connection to a target is unique
2949  * @host:   SRP host.
2950  * @target: SRP target port.
2951  */
2952 static bool srp_conn_unique(struct srp_host *host,
2953                             struct srp_target_port *target)
2954 {
2955         struct srp_target_port *t;
2956         bool ret = false;
2957
2958         if (target->state == SRP_TARGET_REMOVED)
2959                 goto out;
2960
2961         ret = true;
2962
2963         spin_lock(&host->target_lock);
2964         list_for_each_entry(t, &host->target_list, list) {
2965                 if (t != target &&
2966                     target->id_ext == t->id_ext &&
2967                     target->ioc_guid == t->ioc_guid &&
2968                     target->initiator_ext == t->initiator_ext) {
2969                         ret = false;
2970                         break;
2971                 }
2972         }
2973         spin_unlock(&host->target_lock);
2974
2975 out:
2976         return ret;
2977 }
2978
2979 /*
2980  * Target ports are added by writing
2981  *
2982  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2983  *     pkey=<P_Key>,service_id=<service ID>
2984  *
2985  * to the add_target sysfs attribute.
2986  */
2987 enum {
2988         SRP_OPT_ERR             = 0,
2989         SRP_OPT_ID_EXT          = 1 << 0,
2990         SRP_OPT_IOC_GUID        = 1 << 1,
2991         SRP_OPT_DGID            = 1 << 2,
2992         SRP_OPT_PKEY            = 1 << 3,
2993         SRP_OPT_SERVICE_ID      = 1 << 4,
2994         SRP_OPT_MAX_SECT        = 1 << 5,
2995         SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2996         SRP_OPT_IO_CLASS        = 1 << 7,
2997         SRP_OPT_INITIATOR_EXT   = 1 << 8,
2998         SRP_OPT_CMD_SG_ENTRIES  = 1 << 9,
2999         SRP_OPT_ALLOW_EXT_SG    = 1 << 10,
3000         SRP_OPT_SG_TABLESIZE    = 1 << 11,
3001         SRP_OPT_COMP_VECTOR     = 1 << 12,
3002         SRP_OPT_TL_RETRY_COUNT  = 1 << 13,
3003         SRP_OPT_QUEUE_SIZE      = 1 << 14,
3004         SRP_OPT_ALL             = (SRP_OPT_ID_EXT       |
3005                                    SRP_OPT_IOC_GUID     |
3006                                    SRP_OPT_DGID         |
3007                                    SRP_OPT_PKEY         |
3008                                    SRP_OPT_SERVICE_ID),
3009 };
3010
3011 static const match_table_t srp_opt_tokens = {
3012         { SRP_OPT_ID_EXT,               "id_ext=%s"             },
3013         { SRP_OPT_IOC_GUID,             "ioc_guid=%s"           },
3014         { SRP_OPT_DGID,                 "dgid=%s"               },
3015         { SRP_OPT_PKEY,                 "pkey=%x"               },
3016         { SRP_OPT_SERVICE_ID,           "service_id=%s"         },
3017         { SRP_OPT_MAX_SECT,             "max_sect=%d"           },
3018         { SRP_OPT_MAX_CMD_PER_LUN,      "max_cmd_per_lun=%d"    },
3019         { SRP_OPT_IO_CLASS,             "io_class=%x"           },
3020         { SRP_OPT_INITIATOR_EXT,        "initiator_ext=%s"      },
3021         { SRP_OPT_CMD_SG_ENTRIES,       "cmd_sg_entries=%u"     },
3022         { SRP_OPT_ALLOW_EXT_SG,         "allow_ext_sg=%u"       },
3023         { SRP_OPT_SG_TABLESIZE,         "sg_tablesize=%u"       },
3024         { SRP_OPT_COMP_VECTOR,          "comp_vector=%u"        },
3025         { SRP_OPT_TL_RETRY_COUNT,       "tl_retry_count=%u"     },
3026         { SRP_OPT_QUEUE_SIZE,           "queue_size=%d"         },
3027         { SRP_OPT_ERR,                  NULL                    }
3028 };
3029
3030 static int srp_parse_options(const char *buf, struct srp_target_port *target)
3031 {
3032         char *options, *sep_opt;
3033         char *p;
3034         char dgid[3];
3035         substring_t args[MAX_OPT_ARGS];
3036         int opt_mask = 0;
3037         int token;
3038         int ret = -EINVAL;
3039         int i;
3040
3041         options = kstrdup(buf, GFP_KERNEL);
3042         if (!options)
3043                 return -ENOMEM;
3044
3045         sep_opt = options;
3046         while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3047                 if (!*p)
3048                         continue;
3049
3050                 token = match_token(p, srp_opt_tokens, args);
3051                 opt_mask |= token;
3052
3053                 switch (token) {
3054                 case SRP_OPT_ID_EXT:
3055                         p = match_strdup(args);
3056                         if (!p) {
3057                                 ret = -ENOMEM;
3058                                 goto out;
3059                         }
3060                         target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3061                         kfree(p);
3062                         break;
3063
3064                 case SRP_OPT_IOC_GUID:
3065                         p = match_strdup(args);
3066                         if (!p) {
3067                                 ret = -ENOMEM;
3068                                 goto out;
3069                         }
3070                         target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
3071                         kfree(p);
3072                         break;
3073
3074                 case SRP_OPT_DGID:
3075                         p = match_strdup(args);
3076                         if (!p) {
3077                                 ret = -ENOMEM;
3078                                 goto out;
3079                         }
3080                         if (strlen(p) != 32) {
3081                                 pr_warn("bad dest GID parameter '%s'\n", p);
3082                                 kfree(p);
3083                                 goto out;
3084                         }
3085
3086                         for (i = 0; i < 16; ++i) {
3087                                 strlcpy(dgid, p + i * 2, sizeof(dgid));
3088                                 if (sscanf(dgid, "%hhx",
3089                                            &target->orig_dgid.raw[i]) < 1) {
3090                                         ret = -EINVAL;
3091                                         kfree(p);
3092                                         goto out;
3093                                 }
3094                         }
3095                         kfree(p);
3096                         break;
3097
3098                 case SRP_OPT_PKEY:
3099                         if (match_hex(args, &token)) {
3100                                 pr_warn("bad P_Key parameter '%s'\n", p);
3101                                 goto out;
3102                         }
3103                         target->pkey = cpu_to_be16(token);
3104                         break;
3105
3106                 case SRP_OPT_SERVICE_ID:
3107                         p = match_strdup(args);
3108                         if (!p) {
3109                                 ret = -ENOMEM;
3110                                 goto out;
3111                         }
3112                         target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3113                         kfree(p);
3114                         break;
3115
3116                 case SRP_OPT_MAX_SECT:
3117                         if (match_int(args, &token)) {
3118                                 pr_warn("bad max sect parameter '%s'\n", p);
3119                                 goto out;
3120                         }
3121                         target->scsi_host->max_sectors = token;
3122                         break;
3123
3124                 case SRP_OPT_QUEUE_SIZE:
3125                         if (match_int(args, &token) || token < 1) {
3126                                 pr_warn("bad queue_size parameter '%s'\n", p);
3127                                 goto out;
3128                         }
3129                         target->scsi_host->can_queue = token;
3130                         target->queue_size = token + SRP_RSP_SQ_SIZE +
3131                                              SRP_TSK_MGMT_SQ_SIZE;
3132                         if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3133                                 target->scsi_host->cmd_per_lun = token;
3134                         break;
3135
3136                 case SRP_OPT_MAX_CMD_PER_LUN:
3137                         if (match_int(args, &token) || token < 1) {
3138                                 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3139                                         p);
3140                                 goto out;
3141                         }
3142                         target->scsi_host->cmd_per_lun = token;
3143                         break;
3144
3145                 case SRP_OPT_IO_CLASS:
3146                         if (match_hex(args, &token)) {
3147                                 pr_warn("bad IO class parameter '%s'\n", p);
3148                                 goto out;
3149                         }
3150                         if (token != SRP_REV10_IB_IO_CLASS &&
3151                             token != SRP_REV16A_IB_IO_CLASS) {
3152                                 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3153                                         token, SRP_REV10_IB_IO_CLASS,
3154                                         SRP_REV16A_IB_IO_CLASS);
3155                                 goto out;
3156                         }
3157                         target->io_class = token;
3158                         break;
3159
3160                 case SRP_OPT_INITIATOR_EXT:
3161                         p = match_strdup(args);
3162                         if (!p) {
3163                                 ret = -ENOMEM;
3164                                 goto out;
3165                         }
3166                         target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3167                         kfree(p);
3168                         break;
3169
3170                 case SRP_OPT_CMD_SG_ENTRIES:
3171                         if (match_int(args, &token) || token < 1 || token > 255) {
3172                                 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3173                                         p);
3174                                 goto out;
3175                         }
3176                         target->cmd_sg_cnt = token;
3177                         break;
3178
3179                 case SRP_OPT_ALLOW_EXT_SG:
3180                         if (match_int(args, &token)) {
3181                                 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3182                                 goto out;
3183                         }
3184                         target->allow_ext_sg = !!token;
3185                         break;
3186
3187                 case SRP_OPT_SG_TABLESIZE:
3188                         if (match_int(args, &token) || token < 1 ||
3189                                         token > SG_MAX_SEGMENTS) {
3190                                 pr_warn("bad max sg_tablesize parameter '%s'\n",
3191                                         p);
3192                                 goto out;
3193                         }
3194                         target->sg_tablesize = token;
3195                         break;
3196
3197                 case SRP_OPT_COMP_VECTOR:
3198                         if (match_int(args, &token) || token < 0) {
3199                                 pr_warn("bad comp_vector parameter '%s'\n", p);
3200                                 goto out;
3201                         }
3202                         target->comp_vector = token;
3203                         break;
3204
3205                 case SRP_OPT_TL_RETRY_COUNT:
3206                         if (match_int(args, &token) || token < 2 || token > 7) {
3207                                 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3208                                         p);
3209                                 goto out;
3210                         }
3211                         target->tl_retry_count = token;
3212                         break;
3213
3214                 default:
3215                         pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3216                                 p);
3217                         goto out;
3218                 }
3219         }
3220
3221         if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3222                 ret = 0;
3223         else
3224                 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3225                         if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3226                             !(srp_opt_tokens[i].token & opt_mask))
3227                                 pr_warn("target creation request is missing parameter '%s'\n",
3228                                         srp_opt_tokens[i].pattern);
3229
3230         if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3231             && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3232                 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3233                         target->scsi_host->cmd_per_lun,
3234                         target->scsi_host->can_queue);
3235
3236 out:
3237         kfree(options);
3238         return ret;
3239 }
3240
3241 static ssize_t srp_create_target(struct device *dev,
3242                                  struct device_attribute *attr,
3243                                  const char *buf, size_t count)
3244 {
3245         struct srp_host *host =
3246                 container_of(dev, struct srp_host, dev);
3247         struct Scsi_Host *target_host;
3248         struct srp_target_port *target;
3249         struct srp_rdma_ch *ch;
3250         struct srp_device *srp_dev = host->srp_dev;
3251         struct ib_device *ibdev = srp_dev->dev;
3252         int ret, node_idx, node, cpu, i;
3253         unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3254         bool multich = false;
3255
3256         target_host = scsi_host_alloc(&srp_template,
3257                                       sizeof (struct srp_target_port));
3258         if (!target_host)
3259                 return -ENOMEM;
3260
3261         target_host->transportt  = ib_srp_transport_template;
3262         target_host->max_channel = 0;
3263         target_host->max_id      = 1;
3264         target_host->max_lun     = -1LL;
3265         target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3266
3267         target = host_to_target(target_host);
3268
3269         target->io_class        = SRP_REV16A_IB_IO_CLASS;
3270         target->scsi_host       = target_host;
3271         target->srp_host        = host;
3272         target->lkey            = host->srp_dev->pd->local_dma_lkey;
3273         target->global_mr       = host->srp_dev->global_mr;
3274         target->cmd_sg_cnt      = cmd_sg_entries;
3275         target->sg_tablesize    = indirect_sg_entries ? : cmd_sg_entries;
3276         target->allow_ext_sg    = allow_ext_sg;
3277         target->tl_retry_count  = 7;
3278         target->queue_size      = SRP_DEFAULT_QUEUE_SIZE;
3279
3280         /*
3281          * Avoid that the SCSI host can be removed by srp_remove_target()
3282          * before this function returns.
3283          */
3284         scsi_host_get(target->scsi_host);
3285
3286         mutex_lock(&host->add_target_mutex);
3287
3288         ret = srp_parse_options(buf, target);
3289         if (ret)
3290                 goto out;
3291
3292         target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3293
3294         if (!srp_conn_unique(target->srp_host, target)) {
3295                 shost_printk(KERN_INFO, target->scsi_host,
3296                              PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3297                              be64_to_cpu(target->id_ext),
3298                              be64_to_cpu(target->ioc_guid),
3299                              be64_to_cpu(target->initiator_ext));
3300                 ret = -EEXIST;
3301                 goto out;
3302         }
3303
3304         if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3305             target->cmd_sg_cnt < target->sg_tablesize) {
3306                 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3307                 target->sg_tablesize = target->cmd_sg_cnt;
3308         }
3309
3310         if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
3311                 /*
3312                  * FR and FMR can only map one HCA page per entry. If the
3313                  * start address is not aligned on a HCA page boundary two
3314                  * entries will be used for the head and the tail although
3315                  * these two entries combined contain at most one HCA page of
3316                  * data. Hence the "+ 1" in the calculation below.
3317                  *
3318                  * The indirect data buffer descriptor is contiguous so the
3319                  * memory for that buffer will only be registered if
3320                  * register_always is true. Hence add one to mr_per_cmd if
3321                  * register_always has been set.
3322                  */
3323                 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3324                                   (ilog2(srp_dev->mr_page_size) - 9);
3325                 mr_per_cmd = register_always +
3326                         (target->scsi_host->max_sectors + 1 +
3327                          max_sectors_per_mr - 1) / max_sectors_per_mr;
3328                 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3329                          target->scsi_host->max_sectors,
3330                          srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3331                          max_sectors_per_mr, mr_per_cmd);
3332         }
3333
3334         target_host->sg_tablesize = target->sg_tablesize;
3335         target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3336         target->mr_per_cmd = mr_per_cmd;
3337         target->indirect_size = target->sg_tablesize *
3338                                 sizeof (struct srp_direct_buf);
3339         target->max_iu_len = sizeof (struct srp_cmd) +
3340                              sizeof (struct srp_indirect_buf) +
3341                              target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3342
3343         INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3344         INIT_WORK(&target->remove_work, srp_remove_work);
3345         spin_lock_init(&target->lock);
3346         ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
3347         if (ret)
3348                 goto out;
3349
3350         ret = -ENOMEM;
3351         target->ch_count = max_t(unsigned, num_online_nodes(),
3352                                  min(ch_count ? :
3353                                      min(4 * num_online_nodes(),
3354                                          ibdev->num_comp_vectors),
3355                                      num_online_cpus()));
3356         target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3357                              GFP_KERNEL);
3358         if (!target->ch)
3359                 goto out;
3360
3361         node_idx = 0;
3362         for_each_online_node(node) {
3363                 const int ch_start = (node_idx * target->ch_count /
3364                                       num_online_nodes());
3365                 const int ch_end = ((node_idx + 1) * target->ch_count /
3366                                     num_online_nodes());
3367                 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3368                                       num_online_nodes() + target->comp_vector)
3369                                      % ibdev->num_comp_vectors;
3370                 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3371                                     num_online_nodes() + target->comp_vector)
3372                                    % ibdev->num_comp_vectors;
3373                 int cpu_idx = 0;
3374
3375                 for_each_online_cpu(cpu) {
3376                         if (cpu_to_node(cpu) != node)
3377                                 continue;
3378                         if (ch_start + cpu_idx >= ch_end)
3379                                 continue;
3380                         ch = &target->ch[ch_start + cpu_idx];
3381                         ch->target = target;
3382                         ch->comp_vector = cv_start == cv_end ? cv_start :
3383                                 cv_start + cpu_idx % (cv_end - cv_start);
3384                         spin_lock_init(&ch->lock);
3385                         INIT_LIST_HEAD(&ch->free_tx);
3386                         ret = srp_new_cm_id(ch);
3387                         if (ret)
3388                                 goto err_disconnect;
3389
3390                         ret = srp_create_ch_ib(ch);
3391                         if (ret)
3392                                 goto err_disconnect;
3393
3394                         ret = srp_alloc_req_data(ch);
3395                         if (ret)
3396                                 goto err_disconnect;
3397
3398                         ret = srp_connect_ch(ch, multich);
3399                         if (ret) {
3400                                 shost_printk(KERN_ERR, target->scsi_host,
3401                                              PFX "Connection %d/%d failed\n",
3402                                              ch_start + cpu_idx,
3403                                              target->ch_count);
3404                                 if (node_idx == 0 && cpu_idx == 0) {
3405                                         goto err_disconnect;
3406                                 } else {
3407                                         srp_free_ch_ib(target, ch);
3408                                         srp_free_req_data(target, ch);
3409                                         target->ch_count = ch - target->ch;
3410                                         goto connected;
3411                                 }
3412                         }
3413
3414                         multich = true;
3415                         cpu_idx++;
3416                 }
3417                 node_idx++;
3418         }
3419
3420 connected:
3421         target->scsi_host->nr_hw_queues = target->ch_count;
3422
3423         ret = srp_add_target(host, target);
3424         if (ret)
3425                 goto err_disconnect;
3426
3427         if (target->state != SRP_TARGET_REMOVED) {
3428                 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3429                              "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3430                              be64_to_cpu(target->id_ext),
3431                              be64_to_cpu(target->ioc_guid),
3432                              be16_to_cpu(target->pkey),
3433                              be64_to_cpu(target->service_id),
3434                              target->sgid.raw, target->orig_dgid.raw);
3435         }
3436
3437         ret = count;
3438
3439 out:
3440         mutex_unlock(&host->add_target_mutex);
3441
3442         scsi_host_put(target->scsi_host);
3443         if (ret < 0)
3444                 scsi_host_put(target->scsi_host);
3445
3446         return ret;
3447
3448 err_disconnect:
3449         srp_disconnect_target(target);
3450
3451         for (i = 0; i < target->ch_count; i++) {
3452                 ch = &target->ch[i];
3453                 srp_free_ch_ib(target, ch);
3454                 srp_free_req_data(target, ch);
3455         }
3456
3457         kfree(target->ch);
3458         goto out;
3459 }
3460
3461 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3462
3463 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3464                           char *buf)
3465 {
3466         struct srp_host *host = container_of(dev, struct srp_host, dev);
3467
3468         return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3469 }
3470
3471 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3472
3473 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3474                          char *buf)
3475 {
3476         struct srp_host *host = container_of(dev, struct srp_host, dev);
3477
3478         return sprintf(buf, "%d\n", host->port);
3479 }
3480
3481 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3482
3483 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3484 {
3485         struct srp_host *host;
3486
3487         host = kzalloc(sizeof *host, GFP_KERNEL);
3488         if (!host)
3489                 return NULL;
3490
3491         INIT_LIST_HEAD(&host->target_list);
3492         spin_lock_init(&host->target_lock);
3493         init_completion(&host->released);
3494         mutex_init(&host->add_target_mutex);
3495         host->srp_dev = device;
3496         host->port = port;
3497
3498         host->dev.class = &srp_class;
3499         host->dev.parent = device->dev->dma_device;
3500         dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3501
3502         if (device_register(&host->dev))
3503                 goto free_host;
3504         if (device_create_file(&host->dev, &dev_attr_add_target))
3505                 goto err_class;
3506         if (device_create_file(&host->dev, &dev_attr_ibdev))
3507                 goto err_class;
3508         if (device_create_file(&host->dev, &dev_attr_port))
3509                 goto err_class;
3510
3511         return host;
3512
3513 err_class:
3514         device_unregister(&host->dev);
3515
3516 free_host:
3517         kfree(host);
3518
3519         return NULL;
3520 }
3521
3522 static void srp_add_one(struct ib_device *device)
3523 {
3524         struct srp_device *srp_dev;
3525         struct srp_host *host;
3526         int mr_page_shift, p;
3527         u64 max_pages_per_mr;
3528
3529         srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3530         if (!srp_dev)
3531                 return;
3532
3533         /*
3534          * Use the smallest page size supported by the HCA, down to a
3535          * minimum of 4096 bytes. We're unlikely to build large sglists
3536          * out of smaller entries.
3537          */
3538         mr_page_shift           = max(12, ffs(device->attrs.page_size_cap) - 1);
3539         srp_dev->mr_page_size   = 1 << mr_page_shift;
3540         srp_dev->mr_page_mask   = ~((u64) srp_dev->mr_page_size - 1);
3541         max_pages_per_mr        = device->attrs.max_mr_size;
3542         do_div(max_pages_per_mr, srp_dev->mr_page_size);
3543         pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
3544                  device->attrs.max_mr_size, srp_dev->mr_page_size,
3545                  max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
3546         srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3547                                           max_pages_per_mr);
3548
3549         srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3550                             device->map_phys_fmr && device->unmap_fmr);
3551         srp_dev->has_fr = (device->attrs.device_cap_flags &
3552                            IB_DEVICE_MEM_MGT_EXTENSIONS);
3553         if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
3554                 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3555         } else if (!never_register &&
3556                    device->attrs.max_mr_size >= 2 * srp_dev->mr_page_size) {
3557                 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3558                                          (!srp_dev->has_fmr || prefer_fr));
3559                 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3560         }
3561
3562         if (srp_dev->use_fast_reg) {
3563                 srp_dev->max_pages_per_mr =
3564                         min_t(u32, srp_dev->max_pages_per_mr,
3565                               device->attrs.max_fast_reg_page_list_len);
3566         }
3567         srp_dev->mr_max_size    = srp_dev->mr_page_size *
3568                                    srp_dev->max_pages_per_mr;
3569         pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3570                  device->name, mr_page_shift, device->attrs.max_mr_size,
3571                  device->attrs.max_fast_reg_page_list_len,
3572                  srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3573
3574         INIT_LIST_HEAD(&srp_dev->dev_list);
3575
3576         srp_dev->dev = device;
3577         srp_dev->pd  = ib_alloc_pd(device);
3578         if (IS_ERR(srp_dev->pd))
3579                 goto free_dev;
3580
3581         if (never_register || !register_always ||
3582             (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3583                 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3584                                                    IB_ACCESS_LOCAL_WRITE |
3585                                                    IB_ACCESS_REMOTE_READ |
3586                                                    IB_ACCESS_REMOTE_WRITE);
3587                 if (IS_ERR(srp_dev->global_mr))
3588                         goto err_pd;
3589         } else {
3590                 srp_dev->global_mr = NULL;
3591         }
3592
3593         for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3594                 host = srp_add_port(srp_dev, p);
3595                 if (host)
3596                         list_add_tail(&host->list, &srp_dev->dev_list);
3597         }
3598
3599         ib_set_client_data(device, &srp_client, srp_dev);
3600         return;
3601
3602 err_pd:
3603         ib_dealloc_pd(srp_dev->pd);
3604
3605 free_dev:
3606         kfree(srp_dev);
3607 }
3608
3609 static void srp_remove_one(struct ib_device *device, void *client_data)
3610 {
3611         struct srp_device *srp_dev;
3612         struct srp_host *host, *tmp_host;
3613         struct srp_target_port *target;
3614
3615         srp_dev = client_data;
3616         if (!srp_dev)
3617                 return;
3618
3619         list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3620                 device_unregister(&host->dev);
3621                 /*
3622                  * Wait for the sysfs entry to go away, so that no new
3623                  * target ports can be created.
3624                  */
3625                 wait_for_completion(&host->released);
3626
3627                 /*
3628                  * Remove all target ports.
3629                  */
3630                 spin_lock(&host->target_lock);
3631                 list_for_each_entry(target, &host->target_list, list)
3632                         srp_queue_remove_work(target);
3633                 spin_unlock(&host->target_lock);
3634
3635                 /*
3636                  * Wait for tl_err and target port removal tasks.
3637                  */
3638                 flush_workqueue(system_long_wq);
3639                 flush_workqueue(srp_remove_wq);
3640
3641                 kfree(host);
3642         }
3643
3644         if (srp_dev->global_mr)
3645                 ib_dereg_mr(srp_dev->global_mr);
3646         ib_dealloc_pd(srp_dev->pd);
3647
3648         kfree(srp_dev);
3649 }
3650
3651 static struct srp_function_template ib_srp_transport_functions = {
3652         .has_rport_state         = true,
3653         .reset_timer_if_blocked  = true,
3654         .reconnect_delay         = &srp_reconnect_delay,
3655         .fast_io_fail_tmo        = &srp_fast_io_fail_tmo,
3656         .dev_loss_tmo            = &srp_dev_loss_tmo,
3657         .reconnect               = srp_rport_reconnect,
3658         .rport_delete            = srp_rport_delete,
3659         .terminate_rport_io      = srp_terminate_io,
3660 };
3661
3662 static int __init srp_init_module(void)
3663 {
3664         int ret;
3665
3666         if (srp_sg_tablesize) {
3667                 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3668                 if (!cmd_sg_entries)
3669                         cmd_sg_entries = srp_sg_tablesize;
3670         }
3671
3672         if (!cmd_sg_entries)
3673                 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3674
3675         if (cmd_sg_entries > 255) {
3676                 pr_warn("Clamping cmd_sg_entries to 255\n");
3677                 cmd_sg_entries = 255;
3678         }
3679
3680         if (!indirect_sg_entries)
3681                 indirect_sg_entries = cmd_sg_entries;
3682         else if (indirect_sg_entries < cmd_sg_entries) {
3683                 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3684                         cmd_sg_entries);
3685                 indirect_sg_entries = cmd_sg_entries;
3686         }
3687
3688         srp_remove_wq = create_workqueue("srp_remove");
3689         if (!srp_remove_wq) {
3690                 ret = -ENOMEM;
3691                 goto out;
3692         }
3693
3694         ret = -ENOMEM;
3695         ib_srp_transport_template =
3696                 srp_attach_transport(&ib_srp_transport_functions);
3697         if (!ib_srp_transport_template)
3698                 goto destroy_wq;
3699
3700         ret = class_register(&srp_class);
3701         if (ret) {
3702                 pr_err("couldn't register class infiniband_srp\n");
3703                 goto release_tr;
3704         }
3705
3706         ib_sa_register_client(&srp_sa_client);
3707
3708         ret = ib_register_client(&srp_client);
3709         if (ret) {
3710                 pr_err("couldn't register IB client\n");
3711                 goto unreg_sa;
3712         }
3713
3714 out:
3715         return ret;
3716
3717 unreg_sa:
3718         ib_sa_unregister_client(&srp_sa_client);
3719         class_unregister(&srp_class);
3720
3721 release_tr:
3722         srp_release_transport(ib_srp_transport_template);
3723
3724 destroy_wq:
3725         destroy_workqueue(srp_remove_wq);
3726         goto out;
3727 }
3728
3729 static void __exit srp_cleanup_module(void)
3730 {
3731         ib_unregister_client(&srp_client);
3732         ib_sa_unregister_client(&srp_sa_client);
3733         class_unregister(&srp_class);
3734         srp_release_transport(ib_srp_transport_template);
3735         destroy_workqueue(srp_remove_wq);
3736 }
3737
3738 module_init(srp_init_module);
3739 module_exit(srp_cleanup_module);