]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/target/target_core_rd.c
Merge tag 'kbuild-misc-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/masahi...
[karo-tx-linux.git] / drivers / target / target_core_rd.c
1 /*******************************************************************************
2  * Filename:  target_core_rd.c
3  *
4  * This file contains the Storage Engine <-> Ramdisk transport
5  * specific functions.
6  *
7  * (c) Copyright 2003-2013 Datera, Inc.
8  *
9  * Nicholas A. Bellinger <nab@kernel.org>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24  *
25  ******************************************************************************/
26
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/highmem.h>
30 #include <linux/timer.h>
31 #include <linux/scatterlist.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <scsi/scsi_proto.h>
35
36 #include <target/target_core_base.h>
37 #include <target/target_core_backend.h>
38
39 #include "target_core_rd.h"
40
41 static inline struct rd_dev *RD_DEV(struct se_device *dev)
42 {
43         return container_of(dev, struct rd_dev, dev);
44 }
45
46 static int rd_attach_hba(struct se_hba *hba, u32 host_id)
47 {
48         struct rd_host *rd_host;
49
50         rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
51         if (!rd_host) {
52                 pr_err("Unable to allocate memory for struct rd_host\n");
53                 return -ENOMEM;
54         }
55
56         rd_host->rd_host_id = host_id;
57
58         hba->hba_ptr = rd_host;
59
60         pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
61                 " Generic Target Core Stack %s\n", hba->hba_id,
62                 RD_HBA_VERSION, TARGET_CORE_VERSION);
63
64         return 0;
65 }
66
67 static void rd_detach_hba(struct se_hba *hba)
68 {
69         struct rd_host *rd_host = hba->hba_ptr;
70
71         pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
72                 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
73
74         kfree(rd_host);
75         hba->hba_ptr = NULL;
76 }
77
78 static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
79                                  u32 sg_table_count)
80 {
81         struct page *pg;
82         struct scatterlist *sg;
83         u32 i, j, page_count = 0, sg_per_table;
84
85         for (i = 0; i < sg_table_count; i++) {
86                 sg = sg_table[i].sg_table;
87                 sg_per_table = sg_table[i].rd_sg_count;
88
89                 for (j = 0; j < sg_per_table; j++) {
90                         pg = sg_page(&sg[j]);
91                         if (pg) {
92                                 __free_page(pg);
93                                 page_count++;
94                         }
95                 }
96                 kfree(sg);
97         }
98
99         kfree(sg_table);
100         return page_count;
101 }
102
103 static void rd_release_device_space(struct rd_dev *rd_dev)
104 {
105         u32 page_count;
106
107         if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
108                 return;
109
110         page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
111                                           rd_dev->sg_table_count);
112
113         pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
114                 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
115                 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
116                 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
117
118         rd_dev->sg_table_array = NULL;
119         rd_dev->sg_table_count = 0;
120 }
121
122
123 /*      rd_build_device_space():
124  *
125  *
126  */
127 static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
128                                  u32 total_sg_needed, unsigned char init_payload)
129 {
130         u32 i = 0, j, page_offset = 0, sg_per_table;
131         u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
132                                 sizeof(struct scatterlist));
133         struct page *pg;
134         struct scatterlist *sg;
135         unsigned char *p;
136
137         while (total_sg_needed) {
138                 unsigned int chain_entry = 0;
139
140                 sg_per_table = (total_sg_needed > max_sg_per_table) ?
141                         max_sg_per_table : total_sg_needed;
142
143                 /*
144                  * Reserve extra element for chain entry
145                  */
146                 if (sg_per_table < total_sg_needed)
147                         chain_entry = 1;
148
149                 sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
150                                 GFP_KERNEL);
151                 if (!sg) {
152                         pr_err("Unable to allocate scatterlist array"
153                                 " for struct rd_dev\n");
154                         return -ENOMEM;
155                 }
156
157                 sg_init_table(sg, sg_per_table + chain_entry);
158
159                 if (i > 0) {
160                         sg_chain(sg_table[i - 1].sg_table,
161                                  max_sg_per_table + 1, sg);
162                 }
163
164                 sg_table[i].sg_table = sg;
165                 sg_table[i].rd_sg_count = sg_per_table;
166                 sg_table[i].page_start_offset = page_offset;
167                 sg_table[i++].page_end_offset = (page_offset + sg_per_table)
168                                                 - 1;
169
170                 for (j = 0; j < sg_per_table; j++) {
171                         pg = alloc_pages(GFP_KERNEL, 0);
172                         if (!pg) {
173                                 pr_err("Unable to allocate scatterlist"
174                                         " pages for struct rd_dev_sg_table\n");
175                                 return -ENOMEM;
176                         }
177                         sg_assign_page(&sg[j], pg);
178                         sg[j].length = PAGE_SIZE;
179
180                         p = kmap(pg);
181                         memset(p, init_payload, PAGE_SIZE);
182                         kunmap(pg);
183                 }
184
185                 page_offset += sg_per_table;
186                 total_sg_needed -= sg_per_table;
187         }
188
189         return 0;
190 }
191
192 static int rd_build_device_space(struct rd_dev *rd_dev)
193 {
194         struct rd_dev_sg_table *sg_table;
195         u32 sg_tables, total_sg_needed;
196         u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
197                                 sizeof(struct scatterlist));
198         int rc;
199
200         if (rd_dev->rd_page_count <= 0) {
201                 pr_err("Illegal page count: %u for Ramdisk device\n",
202                        rd_dev->rd_page_count);
203                 return -EINVAL;
204         }
205
206         /* Don't need backing pages for NULLIO */
207         if (rd_dev->rd_flags & RDF_NULLIO)
208                 return 0;
209
210         total_sg_needed = rd_dev->rd_page_count;
211
212         sg_tables = (total_sg_needed / max_sg_per_table) + 1;
213
214         sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
215         if (!sg_table) {
216                 pr_err("Unable to allocate memory for Ramdisk"
217                        " scatterlist tables\n");
218                 return -ENOMEM;
219         }
220
221         rd_dev->sg_table_array = sg_table;
222         rd_dev->sg_table_count = sg_tables;
223
224         rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
225         if (rc)
226                 return rc;
227
228         pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
229                  " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
230                  rd_dev->rd_dev_id, rd_dev->rd_page_count,
231                  rd_dev->sg_table_count);
232
233         return 0;
234 }
235
236 static void rd_release_prot_space(struct rd_dev *rd_dev)
237 {
238         u32 page_count;
239
240         if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
241                 return;
242
243         page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
244                                           rd_dev->sg_prot_count);
245
246         pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
247                  " Device ID: %u, pages %u in %u tables total bytes %lu\n",
248                  rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
249                  rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
250
251         rd_dev->sg_prot_array = NULL;
252         rd_dev->sg_prot_count = 0;
253 }
254
255 static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
256 {
257         struct rd_dev_sg_table *sg_table;
258         u32 total_sg_needed, sg_tables;
259         u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
260                                 sizeof(struct scatterlist));
261         int rc;
262
263         if (rd_dev->rd_flags & RDF_NULLIO)
264                 return 0;
265         /*
266          * prot_length=8byte dif data
267          * tot sg needed = rd_page_count * (PGSZ/block_size) *
268          *                 (prot_length/block_size) + pad
269          * PGSZ canceled each other.
270          */
271         total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
272
273         sg_tables = (total_sg_needed / max_sg_per_table) + 1;
274
275         sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
276         if (!sg_table) {
277                 pr_err("Unable to allocate memory for Ramdisk protection"
278                        " scatterlist tables\n");
279                 return -ENOMEM;
280         }
281
282         rd_dev->sg_prot_array = sg_table;
283         rd_dev->sg_prot_count = sg_tables;
284
285         rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
286         if (rc)
287                 return rc;
288
289         pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
290                  " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
291                  rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
292
293         return 0;
294 }
295
296 static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
297 {
298         struct rd_dev *rd_dev;
299         struct rd_host *rd_host = hba->hba_ptr;
300
301         rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
302         if (!rd_dev) {
303                 pr_err("Unable to allocate memory for struct rd_dev\n");
304                 return NULL;
305         }
306
307         rd_dev->rd_host = rd_host;
308
309         return &rd_dev->dev;
310 }
311
312 static int rd_configure_device(struct se_device *dev)
313 {
314         struct rd_dev *rd_dev = RD_DEV(dev);
315         struct rd_host *rd_host = dev->se_hba->hba_ptr;
316         int ret;
317
318         if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
319                 pr_debug("Missing rd_pages= parameter\n");
320                 return -EINVAL;
321         }
322
323         ret = rd_build_device_space(rd_dev);
324         if (ret < 0)
325                 goto fail;
326
327         dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
328         dev->dev_attrib.hw_max_sectors = UINT_MAX;
329         dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
330         dev->dev_attrib.is_nonrot = 1;
331
332         rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
333
334         pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
335                 " %u pages in %u tables, %lu total bytes\n",
336                 rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
337                 rd_dev->sg_table_count,
338                 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
339
340         return 0;
341
342 fail:
343         rd_release_device_space(rd_dev);
344         return ret;
345 }
346
347 static void rd_dev_call_rcu(struct rcu_head *p)
348 {
349         struct se_device *dev = container_of(p, struct se_device, rcu_head);
350         struct rd_dev *rd_dev = RD_DEV(dev);
351
352         kfree(rd_dev);
353 }
354
355 static void rd_free_device(struct se_device *dev)
356 {
357         struct rd_dev *rd_dev = RD_DEV(dev);
358
359         rd_release_device_space(rd_dev);
360         call_rcu(&dev->rcu_head, rd_dev_call_rcu);
361 }
362
363 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
364 {
365         struct rd_dev_sg_table *sg_table;
366         u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
367                                 sizeof(struct scatterlist));
368
369         i = page / sg_per_table;
370         if (i < rd_dev->sg_table_count) {
371                 sg_table = &rd_dev->sg_table_array[i];
372                 if ((sg_table->page_start_offset <= page) &&
373                     (sg_table->page_end_offset >= page))
374                         return sg_table;
375         }
376
377         pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
378                         page);
379
380         return NULL;
381 }
382
383 static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
384 {
385         struct rd_dev_sg_table *sg_table;
386         u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
387                                 sizeof(struct scatterlist));
388
389         i = page / sg_per_table;
390         if (i < rd_dev->sg_prot_count) {
391                 sg_table = &rd_dev->sg_prot_array[i];
392                 if ((sg_table->page_start_offset <= page) &&
393                      (sg_table->page_end_offset >= page))
394                         return sg_table;
395         }
396
397         pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
398                         page);
399
400         return NULL;
401 }
402
403 static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
404 {
405         struct se_device *se_dev = cmd->se_dev;
406         struct rd_dev *dev = RD_DEV(se_dev);
407         struct rd_dev_sg_table *prot_table;
408         struct scatterlist *prot_sg;
409         u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
410         u32 prot_offset, prot_page;
411         u32 prot_npages __maybe_unused;
412         u64 tmp;
413         sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
414
415         tmp = cmd->t_task_lba * se_dev->prot_length;
416         prot_offset = do_div(tmp, PAGE_SIZE);
417         prot_page = tmp;
418
419         prot_table = rd_get_prot_table(dev, prot_page);
420         if (!prot_table)
421                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
422
423         prot_sg = &prot_table->sg_table[prot_page -
424                                         prot_table->page_start_offset];
425
426         if (is_read)
427                 rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
428                                     prot_sg, prot_offset);
429         else
430                 rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
431                                     cmd->t_prot_sg, 0);
432
433         if (!rc)
434                 sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);
435
436         return rc;
437 }
438
439 static sense_reason_t
440 rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
441               enum dma_data_direction data_direction)
442 {
443         struct se_device *se_dev = cmd->se_dev;
444         struct rd_dev *dev = RD_DEV(se_dev);
445         struct rd_dev_sg_table *table;
446         struct scatterlist *rd_sg;
447         struct sg_mapping_iter m;
448         u32 rd_offset;
449         u32 rd_size;
450         u32 rd_page;
451         u32 src_len;
452         u64 tmp;
453         sense_reason_t rc;
454
455         if (dev->rd_flags & RDF_NULLIO) {
456                 target_complete_cmd(cmd, SAM_STAT_GOOD);
457                 return 0;
458         }
459
460         tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
461         rd_offset = do_div(tmp, PAGE_SIZE);
462         rd_page = tmp;
463         rd_size = cmd->data_length;
464
465         table = rd_get_sg_table(dev, rd_page);
466         if (!table)
467                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
468
469         rd_sg = &table->sg_table[rd_page - table->page_start_offset];
470
471         pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
472                         dev->rd_dev_id,
473                         data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
474                         cmd->t_task_lba, rd_size, rd_page, rd_offset);
475
476         if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
477             data_direction == DMA_TO_DEVICE) {
478                 rc = rd_do_prot_rw(cmd, false);
479                 if (rc)
480                         return rc;
481         }
482
483         src_len = PAGE_SIZE - rd_offset;
484         sg_miter_start(&m, sgl, sgl_nents,
485                         data_direction == DMA_FROM_DEVICE ?
486                                 SG_MITER_TO_SG : SG_MITER_FROM_SG);
487         while (rd_size) {
488                 u32 len;
489                 void *rd_addr;
490
491                 sg_miter_next(&m);
492                 if (!(u32)m.length) {
493                         pr_debug("RD[%u]: invalid sgl %p len %zu\n",
494                                  dev->rd_dev_id, m.addr, m.length);
495                         sg_miter_stop(&m);
496                         return TCM_INCORRECT_AMOUNT_OF_DATA;
497                 }
498                 len = min((u32)m.length, src_len);
499                 if (len > rd_size) {
500                         pr_debug("RD[%u]: size underrun page %d offset %d "
501                                  "size %d\n", dev->rd_dev_id,
502                                  rd_page, rd_offset, rd_size);
503                         len = rd_size;
504                 }
505                 m.consumed = len;
506
507                 rd_addr = sg_virt(rd_sg) + rd_offset;
508
509                 if (data_direction == DMA_FROM_DEVICE)
510                         memcpy(m.addr, rd_addr, len);
511                 else
512                         memcpy(rd_addr, m.addr, len);
513
514                 rd_size -= len;
515                 if (!rd_size)
516                         continue;
517
518                 src_len -= len;
519                 if (src_len) {
520                         rd_offset += len;
521                         continue;
522                 }
523
524                 /* rd page completed, next one please */
525                 rd_page++;
526                 rd_offset = 0;
527                 src_len = PAGE_SIZE;
528                 if (rd_page <= table->page_end_offset) {
529                         rd_sg++;
530                         continue;
531                 }
532
533                 table = rd_get_sg_table(dev, rd_page);
534                 if (!table) {
535                         sg_miter_stop(&m);
536                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
537                 }
538
539                 /* since we increment, the first sg entry is correct */
540                 rd_sg = table->sg_table;
541         }
542         sg_miter_stop(&m);
543
544         if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
545             data_direction == DMA_FROM_DEVICE) {
546                 rc = rd_do_prot_rw(cmd, true);
547                 if (rc)
548                         return rc;
549         }
550
551         target_complete_cmd(cmd, SAM_STAT_GOOD);
552         return 0;
553 }
554
555 enum {
556         Opt_rd_pages, Opt_rd_nullio, Opt_err
557 };
558
559 static match_table_t tokens = {
560         {Opt_rd_pages, "rd_pages=%d"},
561         {Opt_rd_nullio, "rd_nullio=%d"},
562         {Opt_err, NULL}
563 };
564
565 static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
566                 const char *page, ssize_t count)
567 {
568         struct rd_dev *rd_dev = RD_DEV(dev);
569         char *orig, *ptr, *opts;
570         substring_t args[MAX_OPT_ARGS];
571         int ret = 0, arg, token;
572
573         opts = kstrdup(page, GFP_KERNEL);
574         if (!opts)
575                 return -ENOMEM;
576
577         orig = opts;
578
579         while ((ptr = strsep(&opts, ",\n")) != NULL) {
580                 if (!*ptr)
581                         continue;
582
583                 token = match_token(ptr, tokens, args);
584                 switch (token) {
585                 case Opt_rd_pages:
586                         match_int(args, &arg);
587                         rd_dev->rd_page_count = arg;
588                         pr_debug("RAMDISK: Referencing Page"
589                                 " Count: %u\n", rd_dev->rd_page_count);
590                         rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
591                         break;
592                 case Opt_rd_nullio:
593                         match_int(args, &arg);
594                         if (arg != 1)
595                                 break;
596
597                         pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
598                         rd_dev->rd_flags |= RDF_NULLIO;
599                         break;
600                 default:
601                         break;
602                 }
603         }
604
605         kfree(orig);
606         return (!ret) ? count : ret;
607 }
608
609 static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
610 {
611         struct rd_dev *rd_dev = RD_DEV(dev);
612
613         ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
614                         rd_dev->rd_dev_id);
615         bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
616                         "  SG_table_count: %u  nullio: %d\n", rd_dev->rd_page_count,
617                         PAGE_SIZE, rd_dev->sg_table_count,
618                         !!(rd_dev->rd_flags & RDF_NULLIO));
619         return bl;
620 }
621
622 static sector_t rd_get_blocks(struct se_device *dev)
623 {
624         struct rd_dev *rd_dev = RD_DEV(dev);
625
626         unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
627                         dev->dev_attrib.block_size) - 1;
628
629         return blocks_long;
630 }
631
632 static int rd_init_prot(struct se_device *dev)
633 {
634         struct rd_dev *rd_dev = RD_DEV(dev);
635
636         if (!dev->dev_attrib.pi_prot_type)
637                 return 0;
638
639         return rd_build_prot_space(rd_dev, dev->prot_length,
640                                    dev->dev_attrib.block_size);
641 }
642
643 static void rd_free_prot(struct se_device *dev)
644 {
645         struct rd_dev *rd_dev = RD_DEV(dev);
646
647         rd_release_prot_space(rd_dev);
648 }
649
650 static struct sbc_ops rd_sbc_ops = {
651         .execute_rw             = rd_execute_rw,
652 };
653
654 static sense_reason_t
655 rd_parse_cdb(struct se_cmd *cmd)
656 {
657         return sbc_parse_cdb(cmd, &rd_sbc_ops);
658 }
659
660 static const struct target_backend_ops rd_mcp_ops = {
661         .name                   = "rd_mcp",
662         .inquiry_prod           = "RAMDISK-MCP",
663         .inquiry_rev            = RD_MCP_VERSION,
664         .attach_hba             = rd_attach_hba,
665         .detach_hba             = rd_detach_hba,
666         .alloc_device           = rd_alloc_device,
667         .configure_device       = rd_configure_device,
668         .free_device            = rd_free_device,
669         .parse_cdb              = rd_parse_cdb,
670         .set_configfs_dev_params = rd_set_configfs_dev_params,
671         .show_configfs_dev_params = rd_show_configfs_dev_params,
672         .get_device_type        = sbc_get_device_type,
673         .get_blocks             = rd_get_blocks,
674         .init_prot              = rd_init_prot,
675         .free_prot              = rd_free_prot,
676         .tb_dev_attrib_attrs    = sbc_attrib_attrs,
677 };
678
679 int __init rd_module_init(void)
680 {
681         return transport_backend_register(&rd_mcp_ops);
682 }
683
684 void rd_module_exit(void)
685 {
686         target_backend_unregister(&rd_mcp_ops);
687 }