]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/nvme/target/fcloop.c
Merge tag 'pwm/for-4.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/thierry...
[karo-tx-linux.git] / drivers / nvme / target / fcloop.c
1 /*
2  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful.
9  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10  * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12  * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13  * See the GNU General Public License for more details, a copy of which
14  * can be found in the file COPYING included with this package
15  */
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/module.h>
18 #include <linux/parser.h>
19 #include <uapi/scsi/fc/fc_fs.h>
20
21 #include "../host/nvme.h"
22 #include "../target/nvmet.h"
23 #include <linux/nvme-fc-driver.h>
24 #include <linux/nvme-fc.h>
25
26
27 enum {
28         NVMF_OPT_ERR            = 0,
29         NVMF_OPT_WWNN           = 1 << 0,
30         NVMF_OPT_WWPN           = 1 << 1,
31         NVMF_OPT_ROLES          = 1 << 2,
32         NVMF_OPT_FCADDR         = 1 << 3,
33         NVMF_OPT_LPWWNN         = 1 << 4,
34         NVMF_OPT_LPWWPN         = 1 << 5,
35 };
36
37 struct fcloop_ctrl_options {
38         int                     mask;
39         u64                     wwnn;
40         u64                     wwpn;
41         u32                     roles;
42         u32                     fcaddr;
43         u64                     lpwwnn;
44         u64                     lpwwpn;
45 };
46
47 static const match_table_t opt_tokens = {
48         { NVMF_OPT_WWNN,        "wwnn=%s"       },
49         { NVMF_OPT_WWPN,        "wwpn=%s"       },
50         { NVMF_OPT_ROLES,       "roles=%d"      },
51         { NVMF_OPT_FCADDR,      "fcaddr=%x"     },
52         { NVMF_OPT_LPWWNN,      "lpwwnn=%s"     },
53         { NVMF_OPT_LPWWPN,      "lpwwpn=%s"     },
54         { NVMF_OPT_ERR,         NULL            }
55 };
56
57 static int
58 fcloop_parse_options(struct fcloop_ctrl_options *opts,
59                 const char *buf)
60 {
61         substring_t args[MAX_OPT_ARGS];
62         char *options, *o, *p;
63         int token, ret = 0;
64         u64 token64;
65
66         options = o = kstrdup(buf, GFP_KERNEL);
67         if (!options)
68                 return -ENOMEM;
69
70         while ((p = strsep(&o, ",\n")) != NULL) {
71                 if (!*p)
72                         continue;
73
74                 token = match_token(p, opt_tokens, args);
75                 opts->mask |= token;
76                 switch (token) {
77                 case NVMF_OPT_WWNN:
78                         if (match_u64(args, &token64)) {
79                                 ret = -EINVAL;
80                                 goto out_free_options;
81                         }
82                         opts->wwnn = token64;
83                         break;
84                 case NVMF_OPT_WWPN:
85                         if (match_u64(args, &token64)) {
86                                 ret = -EINVAL;
87                                 goto out_free_options;
88                         }
89                         opts->wwpn = token64;
90                         break;
91                 case NVMF_OPT_ROLES:
92                         if (match_int(args, &token)) {
93                                 ret = -EINVAL;
94                                 goto out_free_options;
95                         }
96                         opts->roles = token;
97                         break;
98                 case NVMF_OPT_FCADDR:
99                         if (match_hex(args, &token)) {
100                                 ret = -EINVAL;
101                                 goto out_free_options;
102                         }
103                         opts->fcaddr = token;
104                         break;
105                 case NVMF_OPT_LPWWNN:
106                         if (match_u64(args, &token64)) {
107                                 ret = -EINVAL;
108                                 goto out_free_options;
109                         }
110                         opts->lpwwnn = token64;
111                         break;
112                 case NVMF_OPT_LPWWPN:
113                         if (match_u64(args, &token64)) {
114                                 ret = -EINVAL;
115                                 goto out_free_options;
116                         }
117                         opts->lpwwpn = token64;
118                         break;
119                 default:
120                         pr_warn("unknown parameter or missing value '%s'\n", p);
121                         ret = -EINVAL;
122                         goto out_free_options;
123                 }
124         }
125
126 out_free_options:
127         kfree(options);
128         return ret;
129 }
130
131
132 static int
133 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
134                 const char *buf)
135 {
136         substring_t args[MAX_OPT_ARGS];
137         char *options, *o, *p;
138         int token, ret = 0;
139         u64 token64;
140
141         *nname = -1;
142         *pname = -1;
143
144         options = o = kstrdup(buf, GFP_KERNEL);
145         if (!options)
146                 return -ENOMEM;
147
148         while ((p = strsep(&o, ",\n")) != NULL) {
149                 if (!*p)
150                         continue;
151
152                 token = match_token(p, opt_tokens, args);
153                 switch (token) {
154                 case NVMF_OPT_WWNN:
155                         if (match_u64(args, &token64)) {
156                                 ret = -EINVAL;
157                                 goto out_free_options;
158                         }
159                         *nname = token64;
160                         break;
161                 case NVMF_OPT_WWPN:
162                         if (match_u64(args, &token64)) {
163                                 ret = -EINVAL;
164                                 goto out_free_options;
165                         }
166                         *pname = token64;
167                         break;
168                 default:
169                         pr_warn("unknown parameter or missing value '%s'\n", p);
170                         ret = -EINVAL;
171                         goto out_free_options;
172                 }
173         }
174
175 out_free_options:
176         kfree(options);
177
178         if (!ret) {
179                 if (*nname == -1)
180                         return -EINVAL;
181                 if (*pname == -1)
182                         return -EINVAL;
183         }
184
185         return ret;
186 }
187
188
189 #define LPORT_OPTS      (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
190
191 #define RPORT_OPTS      (NVMF_OPT_WWNN | NVMF_OPT_WWPN |  \
192                          NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
193
194 #define TGTPORT_OPTS    (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
195
196 #define ALL_OPTS        (NVMF_OPT_WWNN | NVMF_OPT_WWPN | NVMF_OPT_ROLES | \
197                          NVMF_OPT_FCADDR | NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
198
199
200 static DEFINE_SPINLOCK(fcloop_lock);
201 static LIST_HEAD(fcloop_lports);
202 static LIST_HEAD(fcloop_nports);
203
204 struct fcloop_lport {
205         struct nvme_fc_local_port *localport;
206         struct list_head lport_list;
207         struct completion unreg_done;
208 };
209
210 struct fcloop_rport {
211         struct nvme_fc_remote_port *remoteport;
212         struct nvmet_fc_target_port *targetport;
213         struct fcloop_nport *nport;
214         struct fcloop_lport *lport;
215 };
216
217 struct fcloop_tport {
218         struct nvmet_fc_target_port *targetport;
219         struct nvme_fc_remote_port *remoteport;
220         struct fcloop_nport *nport;
221         struct fcloop_lport *lport;
222 };
223
224 struct fcloop_nport {
225         struct fcloop_rport *rport;
226         struct fcloop_tport *tport;
227         struct fcloop_lport *lport;
228         struct list_head nport_list;
229         struct kref ref;
230         struct completion rport_unreg_done;
231         struct completion tport_unreg_done;
232         u64 node_name;
233         u64 port_name;
234         u32 port_role;
235         u32 port_id;
236 };
237
238 struct fcloop_lsreq {
239         struct fcloop_tport             *tport;
240         struct nvmefc_ls_req            *lsreq;
241         struct work_struct              work;
242         struct nvmefc_tgt_ls_req        tgt_ls_req;
243         int                             status;
244 };
245
246 struct fcloop_fcpreq {
247         struct fcloop_tport             *tport;
248         struct nvmefc_fcp_req           *fcpreq;
249         u16                             status;
250         struct work_struct              work;
251         struct nvmefc_tgt_fcp_req       tgt_fcp_req;
252 };
253
254
255 static inline struct fcloop_lsreq *
256 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
257 {
258         return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
259 }
260
261 static inline struct fcloop_fcpreq *
262 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
263 {
264         return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
265 }
266
267
268 static int
269 fcloop_create_queue(struct nvme_fc_local_port *localport,
270                         unsigned int qidx, u16 qsize,
271                         void **handle)
272 {
273         *handle = localport;
274         return 0;
275 }
276
277 static void
278 fcloop_delete_queue(struct nvme_fc_local_port *localport,
279                         unsigned int idx, void *handle)
280 {
281 }
282
283
284 /*
285  * Transmit of LS RSP done (e.g. buffers all set). call back up
286  * initiator "done" flows.
287  */
288 static void
289 fcloop_tgt_lsrqst_done_work(struct work_struct *work)
290 {
291         struct fcloop_lsreq *tls_req =
292                 container_of(work, struct fcloop_lsreq, work);
293         struct fcloop_tport *tport = tls_req->tport;
294         struct nvmefc_ls_req *lsreq = tls_req->lsreq;
295
296         if (tport->remoteport)
297                 lsreq->done(lsreq, tls_req->status);
298 }
299
300 static int
301 fcloop_ls_req(struct nvme_fc_local_port *localport,
302                         struct nvme_fc_remote_port *remoteport,
303                         struct nvmefc_ls_req *lsreq)
304 {
305         struct fcloop_lsreq *tls_req = lsreq->private;
306         struct fcloop_rport *rport = remoteport->private;
307         int ret = 0;
308
309         tls_req->lsreq = lsreq;
310         INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
311
312         if (!rport->targetport) {
313                 tls_req->status = -ECONNREFUSED;
314                 schedule_work(&tls_req->work);
315                 return ret;
316         }
317
318         tls_req->status = 0;
319         tls_req->tport = rport->targetport->private;
320         ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
321                                  lsreq->rqstaddr, lsreq->rqstlen);
322
323         return ret;
324 }
325
326 static int
327 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
328                         struct nvmefc_tgt_ls_req *tgt_lsreq)
329 {
330         struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
331         struct nvmefc_ls_req *lsreq = tls_req->lsreq;
332
333         memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
334                 ((lsreq->rsplen < tgt_lsreq->rsplen) ?
335                                 lsreq->rsplen : tgt_lsreq->rsplen));
336         tgt_lsreq->done(tgt_lsreq);
337
338         schedule_work(&tls_req->work);
339
340         return 0;
341 }
342
343 /*
344  * FCP IO operation done. call back up initiator "done" flows.
345  */
346 static void
347 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
348 {
349         struct fcloop_fcpreq *tfcp_req =
350                 container_of(work, struct fcloop_fcpreq, work);
351         struct fcloop_tport *tport = tfcp_req->tport;
352         struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
353
354         if (tport->remoteport) {
355                 fcpreq->status = tfcp_req->status;
356                 fcpreq->done(fcpreq);
357         }
358 }
359
360
361 static int
362 fcloop_fcp_req(struct nvme_fc_local_port *localport,
363                         struct nvme_fc_remote_port *remoteport,
364                         void *hw_queue_handle,
365                         struct nvmefc_fcp_req *fcpreq)
366 {
367         struct fcloop_fcpreq *tfcp_req = fcpreq->private;
368         struct fcloop_rport *rport = remoteport->private;
369         int ret = 0;
370
371         INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
372
373         if (!rport->targetport) {
374                 tfcp_req->status = NVME_SC_FC_TRANSPORT_ERROR;
375                 schedule_work(&tfcp_req->work);
376                 return ret;
377         }
378
379         tfcp_req->fcpreq = fcpreq;
380         tfcp_req->tport = rport->targetport->private;
381
382         ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
383                                  fcpreq->cmdaddr, fcpreq->cmdlen);
384
385         return ret;
386 }
387
388 static void
389 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
390                         struct scatterlist *io_sg, u32 offset, u32 length)
391 {
392         void *data_p, *io_p;
393         u32 data_len, io_len, tlen;
394
395         io_p = sg_virt(io_sg);
396         io_len = io_sg->length;
397
398         for ( ; offset; ) {
399                 tlen = min_t(u32, offset, io_len);
400                 offset -= tlen;
401                 io_len -= tlen;
402                 if (!io_len) {
403                         io_sg = sg_next(io_sg);
404                         io_p = sg_virt(io_sg);
405                         io_len = io_sg->length;
406                 } else
407                         io_p += tlen;
408         }
409
410         data_p = sg_virt(data_sg);
411         data_len = data_sg->length;
412
413         for ( ; length; ) {
414                 tlen = min_t(u32, io_len, data_len);
415                 tlen = min_t(u32, tlen, length);
416
417                 if (op == NVMET_FCOP_WRITEDATA)
418                         memcpy(data_p, io_p, tlen);
419                 else
420                         memcpy(io_p, data_p, tlen);
421
422                 length -= tlen;
423
424                 io_len -= tlen;
425                 if ((!io_len) && (length)) {
426                         io_sg = sg_next(io_sg);
427                         io_p = sg_virt(io_sg);
428                         io_len = io_sg->length;
429                 } else
430                         io_p += tlen;
431
432                 data_len -= tlen;
433                 if ((!data_len) && (length)) {
434                         data_sg = sg_next(data_sg);
435                         data_p = sg_virt(data_sg);
436                         data_len = data_sg->length;
437                 } else
438                         data_p += tlen;
439         }
440 }
441
442 static int
443 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
444                         struct nvmefc_tgt_fcp_req *tgt_fcpreq)
445 {
446         struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
447         struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
448         u32 rsplen = 0, xfrlen = 0;
449         int fcp_err = 0;
450         u8 op = tgt_fcpreq->op;
451
452         switch (op) {
453         case NVMET_FCOP_WRITEDATA:
454                 xfrlen = tgt_fcpreq->transfer_length;
455                 fcloop_fcp_copy_data(op, tgt_fcpreq->sg, fcpreq->first_sgl,
456                                         tgt_fcpreq->offset, xfrlen);
457                 fcpreq->transferred_length += xfrlen;
458                 break;
459
460         case NVMET_FCOP_READDATA:
461         case NVMET_FCOP_READDATA_RSP:
462                 xfrlen = tgt_fcpreq->transfer_length;
463                 fcloop_fcp_copy_data(op, tgt_fcpreq->sg, fcpreq->first_sgl,
464                                         tgt_fcpreq->offset, xfrlen);
465                 fcpreq->transferred_length += xfrlen;
466                 if (op == NVMET_FCOP_READDATA)
467                         break;
468
469                 /* Fall-Thru to RSP handling */
470
471         case NVMET_FCOP_RSP:
472                 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
473                                 fcpreq->rsplen : tgt_fcpreq->rsplen);
474                 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
475                 if (rsplen < tgt_fcpreq->rsplen)
476                         fcp_err = -E2BIG;
477                 fcpreq->rcv_rsplen = rsplen;
478                 fcpreq->status = 0;
479                 tfcp_req->status = 0;
480                 break;
481
482         case NVMET_FCOP_ABORT:
483                 tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED;
484                 break;
485
486         default:
487                 fcp_err = -EINVAL;
488                 break;
489         }
490
491         tgt_fcpreq->transferred_length = xfrlen;
492         tgt_fcpreq->fcp_error = fcp_err;
493         tgt_fcpreq->done(tgt_fcpreq);
494
495         if ((!fcp_err) && (op == NVMET_FCOP_RSP ||
496                         op == NVMET_FCOP_READDATA_RSP ||
497                         op == NVMET_FCOP_ABORT))
498                 schedule_work(&tfcp_req->work);
499
500         return 0;
501 }
502
503 static void
504 fcloop_ls_abort(struct nvme_fc_local_port *localport,
505                         struct nvme_fc_remote_port *remoteport,
506                                 struct nvmefc_ls_req *lsreq)
507 {
508 }
509
510 static void
511 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
512                         struct nvme_fc_remote_port *remoteport,
513                         void *hw_queue_handle,
514                         struct nvmefc_fcp_req *fcpreq)
515 {
516 }
517
518 static void
519 fcloop_localport_delete(struct nvme_fc_local_port *localport)
520 {
521         struct fcloop_lport *lport = localport->private;
522
523         /* release any threads waiting for the unreg to complete */
524         complete(&lport->unreg_done);
525 }
526
527 static void
528 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
529 {
530         struct fcloop_rport *rport = remoteport->private;
531
532         /* release any threads waiting for the unreg to complete */
533         complete(&rport->nport->rport_unreg_done);
534 }
535
536 static void
537 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
538 {
539         struct fcloop_tport *tport = targetport->private;
540
541         /* release any threads waiting for the unreg to complete */
542         complete(&tport->nport->tport_unreg_done);
543 }
544
545 #define FCLOOP_HW_QUEUES                4
546 #define FCLOOP_SGL_SEGS                 256
547 #define FCLOOP_DMABOUND_4G              0xFFFFFFFF
548
549 struct nvme_fc_port_template fctemplate = {
550         .localport_delete       = fcloop_localport_delete,
551         .remoteport_delete      = fcloop_remoteport_delete,
552         .create_queue           = fcloop_create_queue,
553         .delete_queue           = fcloop_delete_queue,
554         .ls_req                 = fcloop_ls_req,
555         .fcp_io                 = fcloop_fcp_req,
556         .ls_abort               = fcloop_ls_abort,
557         .fcp_abort              = fcloop_fcp_abort,
558         .max_hw_queues          = FCLOOP_HW_QUEUES,
559         .max_sgl_segments       = FCLOOP_SGL_SEGS,
560         .max_dif_sgl_segments   = FCLOOP_SGL_SEGS,
561         .dma_boundary           = FCLOOP_DMABOUND_4G,
562         /* sizes of additional private data for data structures */
563         .local_priv_sz          = sizeof(struct fcloop_lport),
564         .remote_priv_sz         = sizeof(struct fcloop_rport),
565         .lsrqst_priv_sz         = sizeof(struct fcloop_lsreq),
566         .fcprqst_priv_sz        = sizeof(struct fcloop_fcpreq),
567 };
568
569 struct nvmet_fc_target_template tgttemplate = {
570         .targetport_delete      = fcloop_targetport_delete,
571         .xmt_ls_rsp             = fcloop_xmt_ls_rsp,
572         .fcp_op                 = fcloop_fcp_op,
573         .max_hw_queues          = FCLOOP_HW_QUEUES,
574         .max_sgl_segments       = FCLOOP_SGL_SEGS,
575         .max_dif_sgl_segments   = FCLOOP_SGL_SEGS,
576         .dma_boundary           = FCLOOP_DMABOUND_4G,
577         /* optional features */
578         .target_features        = NVMET_FCTGTFEAT_READDATA_RSP |
579                                   NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED,
580         /* sizes of additional private data for data structures */
581         .target_priv_sz         = sizeof(struct fcloop_tport),
582 };
583
584 static ssize_t
585 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
586                 const char *buf, size_t count)
587 {
588         struct nvme_fc_port_info pinfo;
589         struct fcloop_ctrl_options *opts;
590         struct nvme_fc_local_port *localport;
591         struct fcloop_lport *lport;
592         int ret;
593
594         opts = kzalloc(sizeof(*opts), GFP_KERNEL);
595         if (!opts)
596                 return -ENOMEM;
597
598         ret = fcloop_parse_options(opts, buf);
599         if (ret)
600                 goto out_free_opts;
601
602         /* everything there ? */
603         if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
604                 ret = -EINVAL;
605                 goto out_free_opts;
606         }
607
608         pinfo.node_name = opts->wwnn;
609         pinfo.port_name = opts->wwpn;
610         pinfo.port_role = opts->roles;
611         pinfo.port_id = opts->fcaddr;
612
613         ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
614         if (!ret) {
615                 unsigned long flags;
616
617                 /* success */
618                 lport = localport->private;
619                 lport->localport = localport;
620                 INIT_LIST_HEAD(&lport->lport_list);
621
622                 spin_lock_irqsave(&fcloop_lock, flags);
623                 list_add_tail(&lport->lport_list, &fcloop_lports);
624                 spin_unlock_irqrestore(&fcloop_lock, flags);
625
626                 /* mark all of the input buffer consumed */
627                 ret = count;
628         }
629
630 out_free_opts:
631         kfree(opts);
632         return ret ? ret : count;
633 }
634
635
636 static void
637 __unlink_local_port(struct fcloop_lport *lport)
638 {
639         list_del(&lport->lport_list);
640 }
641
642 static int
643 __wait_localport_unreg(struct fcloop_lport *lport)
644 {
645         int ret;
646
647         init_completion(&lport->unreg_done);
648
649         ret = nvme_fc_unregister_localport(lport->localport);
650
651         wait_for_completion(&lport->unreg_done);
652
653         return ret;
654 }
655
656
657 static ssize_t
658 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
659                 const char *buf, size_t count)
660 {
661         struct fcloop_lport *tlport, *lport = NULL;
662         u64 nodename, portname;
663         unsigned long flags;
664         int ret;
665
666         ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
667         if (ret)
668                 return ret;
669
670         spin_lock_irqsave(&fcloop_lock, flags);
671
672         list_for_each_entry(tlport, &fcloop_lports, lport_list) {
673                 if (tlport->localport->node_name == nodename &&
674                     tlport->localport->port_name == portname) {
675                         lport = tlport;
676                         __unlink_local_port(lport);
677                         break;
678                 }
679         }
680         spin_unlock_irqrestore(&fcloop_lock, flags);
681
682         if (!lport)
683                 return -ENOENT;
684
685         ret = __wait_localport_unreg(lport);
686
687         return ret ? ret : count;
688 }
689
690 static void
691 fcloop_nport_free(struct kref *ref)
692 {
693         struct fcloop_nport *nport =
694                 container_of(ref, struct fcloop_nport, ref);
695         unsigned long flags;
696
697         spin_lock_irqsave(&fcloop_lock, flags);
698         list_del(&nport->nport_list);
699         spin_unlock_irqrestore(&fcloop_lock, flags);
700
701         kfree(nport);
702 }
703
704 static void
705 fcloop_nport_put(struct fcloop_nport *nport)
706 {
707         kref_put(&nport->ref, fcloop_nport_free);
708 }
709
710 static int
711 fcloop_nport_get(struct fcloop_nport *nport)
712 {
713         return kref_get_unless_zero(&nport->ref);
714 }
715
716 static struct fcloop_nport *
717 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
718 {
719         struct fcloop_nport *newnport, *nport = NULL;
720         struct fcloop_lport *tmplport, *lport = NULL;
721         struct fcloop_ctrl_options *opts;
722         unsigned long flags;
723         u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
724         int ret;
725
726         opts = kzalloc(sizeof(*opts), GFP_KERNEL);
727         if (!opts)
728                 return NULL;
729
730         ret = fcloop_parse_options(opts, buf);
731         if (ret)
732                 goto out_free_opts;
733
734         /* everything there ? */
735         if ((opts->mask & opts_mask) != opts_mask) {
736                 ret = -EINVAL;
737                 goto out_free_opts;
738         }
739
740         newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
741         if (!newnport)
742                 goto out_free_opts;
743
744         INIT_LIST_HEAD(&newnport->nport_list);
745         newnport->node_name = opts->wwnn;
746         newnport->port_name = opts->wwpn;
747         if (opts->mask & NVMF_OPT_ROLES)
748                 newnport->port_role = opts->roles;
749         if (opts->mask & NVMF_OPT_FCADDR)
750                 newnport->port_id = opts->fcaddr;
751         kref_init(&newnport->ref);
752
753         spin_lock_irqsave(&fcloop_lock, flags);
754
755         list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
756                 if (tmplport->localport->node_name == opts->wwnn &&
757                     tmplport->localport->port_name == opts->wwpn)
758                         goto out_invalid_opts;
759
760                 if (tmplport->localport->node_name == opts->lpwwnn &&
761                     tmplport->localport->port_name == opts->lpwwpn)
762                         lport = tmplport;
763         }
764
765         if (remoteport) {
766                 if (!lport)
767                         goto out_invalid_opts;
768                 newnport->lport = lport;
769         }
770
771         list_for_each_entry(nport, &fcloop_nports, nport_list) {
772                 if (nport->node_name == opts->wwnn &&
773                     nport->port_name == opts->wwpn) {
774                         if ((remoteport && nport->rport) ||
775                             (!remoteport && nport->tport)) {
776                                 nport = NULL;
777                                 goto out_invalid_opts;
778                         }
779
780                         fcloop_nport_get(nport);
781
782                         spin_unlock_irqrestore(&fcloop_lock, flags);
783
784                         if (remoteport)
785                                 nport->lport = lport;
786                         if (opts->mask & NVMF_OPT_ROLES)
787                                 nport->port_role = opts->roles;
788                         if (opts->mask & NVMF_OPT_FCADDR)
789                                 nport->port_id = opts->fcaddr;
790                         goto out_free_newnport;
791                 }
792         }
793
794         list_add_tail(&newnport->nport_list, &fcloop_nports);
795
796         spin_unlock_irqrestore(&fcloop_lock, flags);
797
798         kfree(opts);
799         return newnport;
800
801 out_invalid_opts:
802         spin_unlock_irqrestore(&fcloop_lock, flags);
803 out_free_newnport:
804         kfree(newnport);
805 out_free_opts:
806         kfree(opts);
807         return nport;
808 }
809
810 static ssize_t
811 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
812                 const char *buf, size_t count)
813 {
814         struct nvme_fc_remote_port *remoteport;
815         struct fcloop_nport *nport;
816         struct fcloop_rport *rport;
817         struct nvme_fc_port_info pinfo;
818         int ret;
819
820         nport = fcloop_alloc_nport(buf, count, true);
821         if (!nport)
822                 return -EIO;
823
824         pinfo.node_name = nport->node_name;
825         pinfo.port_name = nport->port_name;
826         pinfo.port_role = nport->port_role;
827         pinfo.port_id = nport->port_id;
828
829         ret = nvme_fc_register_remoteport(nport->lport->localport,
830                                                 &pinfo, &remoteport);
831         if (ret || !remoteport) {
832                 fcloop_nport_put(nport);
833                 return ret;
834         }
835
836         /* success */
837         rport = remoteport->private;
838         rport->remoteport = remoteport;
839         rport->targetport = (nport->tport) ?  nport->tport->targetport : NULL;
840         if (nport->tport) {
841                 nport->tport->remoteport = remoteport;
842                 nport->tport->lport = nport->lport;
843         }
844         rport->nport = nport;
845         rport->lport = nport->lport;
846         nport->rport = rport;
847
848         return count;
849 }
850
851
852 static struct fcloop_rport *
853 __unlink_remote_port(struct fcloop_nport *nport)
854 {
855         struct fcloop_rport *rport = nport->rport;
856
857         if (rport && nport->tport)
858                 nport->tport->remoteport = NULL;
859         nport->rport = NULL;
860
861         return rport;
862 }
863
864 static int
865 __wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
866 {
867         int ret;
868
869         if (!rport)
870                 return -EALREADY;
871
872         init_completion(&nport->rport_unreg_done);
873
874         ret = nvme_fc_unregister_remoteport(rport->remoteport);
875         if (ret)
876                 return ret;
877
878         wait_for_completion(&nport->rport_unreg_done);
879
880         fcloop_nport_put(nport);
881
882         return ret;
883 }
884
885 static ssize_t
886 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
887                 const char *buf, size_t count)
888 {
889         struct fcloop_nport *nport = NULL, *tmpport;
890         static struct fcloop_rport *rport;
891         u64 nodename, portname;
892         unsigned long flags;
893         int ret;
894
895         ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
896         if (ret)
897                 return ret;
898
899         spin_lock_irqsave(&fcloop_lock, flags);
900
901         list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
902                 if (tmpport->node_name == nodename &&
903                     tmpport->port_name == portname && tmpport->rport) {
904                         nport = tmpport;
905                         rport = __unlink_remote_port(nport);
906                         break;
907                 }
908         }
909
910         spin_unlock_irqrestore(&fcloop_lock, flags);
911
912         if (!nport)
913                 return -ENOENT;
914
915         ret = __wait_remoteport_unreg(nport, rport);
916
917         return ret ? ret : count;
918 }
919
920 static ssize_t
921 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
922                 const char *buf, size_t count)
923 {
924         struct nvmet_fc_target_port *targetport;
925         struct fcloop_nport *nport;
926         struct fcloop_tport *tport;
927         struct nvmet_fc_port_info tinfo;
928         int ret;
929
930         nport = fcloop_alloc_nport(buf, count, false);
931         if (!nport)
932                 return -EIO;
933
934         tinfo.node_name = nport->node_name;
935         tinfo.port_name = nport->port_name;
936         tinfo.port_id = nport->port_id;
937
938         ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
939                                                 &targetport);
940         if (ret) {
941                 fcloop_nport_put(nport);
942                 return ret;
943         }
944
945         /* success */
946         tport = targetport->private;
947         tport->targetport = targetport;
948         tport->remoteport = (nport->rport) ?  nport->rport->remoteport : NULL;
949         if (nport->rport)
950                 nport->rport->targetport = targetport;
951         tport->nport = nport;
952         tport->lport = nport->lport;
953         nport->tport = tport;
954
955         return count;
956 }
957
958
959 static struct fcloop_tport *
960 __unlink_target_port(struct fcloop_nport *nport)
961 {
962         struct fcloop_tport *tport = nport->tport;
963
964         if (tport && nport->rport)
965                 nport->rport->targetport = NULL;
966         nport->tport = NULL;
967
968         return tport;
969 }
970
971 static int
972 __wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
973 {
974         int ret;
975
976         if (!tport)
977                 return -EALREADY;
978
979         init_completion(&nport->tport_unreg_done);
980
981         ret = nvmet_fc_unregister_targetport(tport->targetport);
982         if (ret)
983                 return ret;
984
985         wait_for_completion(&nport->tport_unreg_done);
986
987         fcloop_nport_put(nport);
988
989         return ret;
990 }
991
992 static ssize_t
993 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
994                 const char *buf, size_t count)
995 {
996         struct fcloop_nport *nport = NULL, *tmpport;
997         struct fcloop_tport *tport;
998         u64 nodename, portname;
999         unsigned long flags;
1000         int ret;
1001
1002         ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1003         if (ret)
1004                 return ret;
1005
1006         spin_lock_irqsave(&fcloop_lock, flags);
1007
1008         list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1009                 if (tmpport->node_name == nodename &&
1010                     tmpport->port_name == portname && tmpport->tport) {
1011                         nport = tmpport;
1012                         tport = __unlink_target_port(nport);
1013                         break;
1014                 }
1015         }
1016
1017         spin_unlock_irqrestore(&fcloop_lock, flags);
1018
1019         if (!nport)
1020                 return -ENOENT;
1021
1022         ret = __wait_targetport_unreg(nport, tport);
1023
1024         return ret ? ret : count;
1025 }
1026
1027
1028 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1029 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1030 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1031 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1032 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1033 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1034
1035 static struct attribute *fcloop_dev_attrs[] = {
1036         &dev_attr_add_local_port.attr,
1037         &dev_attr_del_local_port.attr,
1038         &dev_attr_add_remote_port.attr,
1039         &dev_attr_del_remote_port.attr,
1040         &dev_attr_add_target_port.attr,
1041         &dev_attr_del_target_port.attr,
1042         NULL
1043 };
1044
1045 static struct attribute_group fclopp_dev_attrs_group = {
1046         .attrs          = fcloop_dev_attrs,
1047 };
1048
1049 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1050         &fclopp_dev_attrs_group,
1051         NULL,
1052 };
1053
1054 static struct class *fcloop_class;
1055 static struct device *fcloop_device;
1056
1057
1058 static int __init fcloop_init(void)
1059 {
1060         int ret;
1061
1062         fcloop_class = class_create(THIS_MODULE, "fcloop");
1063         if (IS_ERR(fcloop_class)) {
1064                 pr_err("couldn't register class fcloop\n");
1065                 ret = PTR_ERR(fcloop_class);
1066                 return ret;
1067         }
1068
1069         fcloop_device = device_create_with_groups(
1070                                 fcloop_class, NULL, MKDEV(0, 0), NULL,
1071                                 fcloop_dev_attr_groups, "ctl");
1072         if (IS_ERR(fcloop_device)) {
1073                 pr_err("couldn't create ctl device!\n");
1074                 ret = PTR_ERR(fcloop_device);
1075                 goto out_destroy_class;
1076         }
1077
1078         get_device(fcloop_device);
1079
1080         return 0;
1081
1082 out_destroy_class:
1083         class_destroy(fcloop_class);
1084         return ret;
1085 }
1086
1087 static void __exit fcloop_exit(void)
1088 {
1089         struct fcloop_lport *lport;
1090         struct fcloop_nport *nport;
1091         struct fcloop_tport *tport;
1092         struct fcloop_rport *rport;
1093         unsigned long flags;
1094         int ret;
1095
1096         spin_lock_irqsave(&fcloop_lock, flags);
1097
1098         for (;;) {
1099                 nport = list_first_entry_or_null(&fcloop_nports,
1100                                                 typeof(*nport), nport_list);
1101                 if (!nport)
1102                         break;
1103
1104                 tport = __unlink_target_port(nport);
1105                 rport = __unlink_remote_port(nport);
1106
1107                 spin_unlock_irqrestore(&fcloop_lock, flags);
1108
1109                 ret = __wait_targetport_unreg(nport, tport);
1110                 if (ret)
1111                         pr_warn("%s: Failed deleting target port\n", __func__);
1112
1113                 ret = __wait_remoteport_unreg(nport, rport);
1114                 if (ret)
1115                         pr_warn("%s: Failed deleting remote port\n", __func__);
1116
1117                 spin_lock_irqsave(&fcloop_lock, flags);
1118         }
1119
1120         for (;;) {
1121                 lport = list_first_entry_or_null(&fcloop_lports,
1122                                                 typeof(*lport), lport_list);
1123                 if (!lport)
1124                         break;
1125
1126                 __unlink_local_port(lport);
1127
1128                 spin_unlock_irqrestore(&fcloop_lock, flags);
1129
1130                 ret = __wait_localport_unreg(lport);
1131                 if (ret)
1132                         pr_warn("%s: Failed deleting local port\n", __func__);
1133
1134                 spin_lock_irqsave(&fcloop_lock, flags);
1135         }
1136
1137         spin_unlock_irqrestore(&fcloop_lock, flags);
1138
1139         put_device(fcloop_device);
1140
1141         device_destroy(fcloop_class, MKDEV(0, 0));
1142         class_destroy(fcloop_class);
1143 }
1144
1145 module_init(fcloop_init);
1146 module_exit(fcloop_exit);
1147
1148 MODULE_LICENSE("GPL v2");