]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/target/target_core_rd.c
target: Don't force session reset if queue_depth does not change
[karo-tx-linux.git] / drivers / target / target_core_rd.c
1 /*******************************************************************************
2  * Filename:  target_core_rd.c
3  *
4  * This file contains the Storage Engine <-> Ramdisk transport
5  * specific functions.
6  *
7  * (c) Copyright 2003-2013 Datera, Inc.
8  *
9  * Nicholas A. Bellinger <nab@kernel.org>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24  *
25  ******************************************************************************/
26
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/highmem.h>
30 #include <linux/timer.h>
31 #include <linux/scatterlist.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <scsi/scsi_proto.h>
35
36 #include <target/target_core_base.h>
37 #include <target/target_core_backend.h>
38
39 #include "target_core_rd.h"
40
41 static inline struct rd_dev *RD_DEV(struct se_device *dev)
42 {
43         return container_of(dev, struct rd_dev, dev);
44 }
45
46 static int rd_attach_hba(struct se_hba *hba, u32 host_id)
47 {
48         struct rd_host *rd_host;
49
50         rd_host = kzalloc(sizeof(*rd_host), GFP_KERNEL);
51         if (!rd_host)
52                 return -ENOMEM;
53
54         rd_host->rd_host_id = host_id;
55
56         hba->hba_ptr = rd_host;
57
58         pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
59                 " Generic Target Core Stack %s\n", hba->hba_id,
60                 RD_HBA_VERSION, TARGET_CORE_VERSION);
61
62         return 0;
63 }
64
65 static void rd_detach_hba(struct se_hba *hba)
66 {
67         struct rd_host *rd_host = hba->hba_ptr;
68
69         pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
70                 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
71
72         kfree(rd_host);
73         hba->hba_ptr = NULL;
74 }
75
76 static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
77                                  u32 sg_table_count)
78 {
79         struct page *pg;
80         struct scatterlist *sg;
81         u32 i, j, page_count = 0, sg_per_table;
82
83         for (i = 0; i < sg_table_count; i++) {
84                 sg = sg_table[i].sg_table;
85                 sg_per_table = sg_table[i].rd_sg_count;
86
87                 for (j = 0; j < sg_per_table; j++) {
88                         pg = sg_page(&sg[j]);
89                         if (pg) {
90                                 __free_page(pg);
91                                 page_count++;
92                         }
93                 }
94                 kfree(sg);
95         }
96
97         kfree(sg_table);
98         return page_count;
99 }
100
101 static void rd_release_device_space(struct rd_dev *rd_dev)
102 {
103         u32 page_count;
104
105         if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
106                 return;
107
108         page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
109                                           rd_dev->sg_table_count);
110
111         pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
112                 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
113                 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
114                 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
115
116         rd_dev->sg_table_array = NULL;
117         rd_dev->sg_table_count = 0;
118 }
119
120
121 /*      rd_build_device_space():
122  *
123  *
124  */
125 static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
126                                  u32 total_sg_needed, unsigned char init_payload)
127 {
128         u32 i = 0, j, page_offset = 0, sg_per_table;
129         u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
130                                 sizeof(struct scatterlist));
131         struct page *pg;
132         struct scatterlist *sg;
133         unsigned char *p;
134
135         while (total_sg_needed) {
136                 unsigned int chain_entry = 0;
137
138                 sg_per_table = (total_sg_needed > max_sg_per_table) ?
139                         max_sg_per_table : total_sg_needed;
140
141                 /*
142                  * Reserve extra element for chain entry
143                  */
144                 if (sg_per_table < total_sg_needed)
145                         chain_entry = 1;
146
147                 sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
148                                 GFP_KERNEL);
149                 if (!sg)
150                         return -ENOMEM;
151
152                 sg_init_table(sg, sg_per_table + chain_entry);
153
154                 if (i > 0) {
155                         sg_chain(sg_table[i - 1].sg_table,
156                                  max_sg_per_table + 1, sg);
157                 }
158
159                 sg_table[i].sg_table = sg;
160                 sg_table[i].rd_sg_count = sg_per_table;
161                 sg_table[i].page_start_offset = page_offset;
162                 sg_table[i++].page_end_offset = (page_offset + sg_per_table)
163                                                 - 1;
164
165                 for (j = 0; j < sg_per_table; j++) {
166                         pg = alloc_pages(GFP_KERNEL, 0);
167                         if (!pg) {
168                                 pr_err("Unable to allocate scatterlist"
169                                         " pages for struct rd_dev_sg_table\n");
170                                 return -ENOMEM;
171                         }
172                         sg_assign_page(&sg[j], pg);
173                         sg[j].length = PAGE_SIZE;
174
175                         p = kmap(pg);
176                         memset(p, init_payload, PAGE_SIZE);
177                         kunmap(pg);
178                 }
179
180                 page_offset += sg_per_table;
181                 total_sg_needed -= sg_per_table;
182         }
183
184         return 0;
185 }
186
187 static int rd_build_device_space(struct rd_dev *rd_dev)
188 {
189         struct rd_dev_sg_table *sg_table;
190         u32 sg_tables, total_sg_needed;
191         u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
192                                 sizeof(struct scatterlist));
193         int rc;
194
195         if (rd_dev->rd_page_count <= 0) {
196                 pr_err("Illegal page count: %u for Ramdisk device\n",
197                        rd_dev->rd_page_count);
198                 return -EINVAL;
199         }
200
201         /* Don't need backing pages for NULLIO */
202         if (rd_dev->rd_flags & RDF_NULLIO)
203                 return 0;
204
205         total_sg_needed = rd_dev->rd_page_count;
206
207         sg_tables = (total_sg_needed / max_sg_per_table) + 1;
208         sg_table = kcalloc(sg_tables, sizeof(*sg_table), GFP_KERNEL);
209         if (!sg_table)
210                 return -ENOMEM;
211
212         rd_dev->sg_table_array = sg_table;
213         rd_dev->sg_table_count = sg_tables;
214
215         rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
216         if (rc)
217                 return rc;
218
219         pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
220                  " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
221                  rd_dev->rd_dev_id, rd_dev->rd_page_count,
222                  rd_dev->sg_table_count);
223
224         return 0;
225 }
226
227 static void rd_release_prot_space(struct rd_dev *rd_dev)
228 {
229         u32 page_count;
230
231         if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
232                 return;
233
234         page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
235                                           rd_dev->sg_prot_count);
236
237         pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
238                  " Device ID: %u, pages %u in %u tables total bytes %lu\n",
239                  rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
240                  rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
241
242         rd_dev->sg_prot_array = NULL;
243         rd_dev->sg_prot_count = 0;
244 }
245
246 static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
247 {
248         struct rd_dev_sg_table *sg_table;
249         u32 total_sg_needed, sg_tables;
250         u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
251                                 sizeof(struct scatterlist));
252         int rc;
253
254         if (rd_dev->rd_flags & RDF_NULLIO)
255                 return 0;
256         /*
257          * prot_length=8byte dif data
258          * tot sg needed = rd_page_count * (PGSZ/block_size) *
259          *                 (prot_length/block_size) + pad
260          * PGSZ canceled each other.
261          */
262         total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
263
264         sg_tables = (total_sg_needed / max_sg_per_table) + 1;
265         sg_table = kcalloc(sg_tables, sizeof(*sg_table), GFP_KERNEL);
266         if (!sg_table)
267                 return -ENOMEM;
268
269         rd_dev->sg_prot_array = sg_table;
270         rd_dev->sg_prot_count = sg_tables;
271
272         rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
273         if (rc)
274                 return rc;
275
276         pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
277                  " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
278                  rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
279
280         return 0;
281 }
282
283 static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
284 {
285         struct rd_dev *rd_dev;
286         struct rd_host *rd_host = hba->hba_ptr;
287
288         rd_dev = kzalloc(sizeof(*rd_dev), GFP_KERNEL);
289         if (!rd_dev)
290                 return NULL;
291
292         rd_dev->rd_host = rd_host;
293
294         return &rd_dev->dev;
295 }
296
297 static int rd_configure_device(struct se_device *dev)
298 {
299         struct rd_dev *rd_dev = RD_DEV(dev);
300         struct rd_host *rd_host = dev->se_hba->hba_ptr;
301         int ret;
302
303         if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
304                 pr_debug("Missing rd_pages= parameter\n");
305                 return -EINVAL;
306         }
307
308         ret = rd_build_device_space(rd_dev);
309         if (ret < 0)
310                 goto fail;
311
312         dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
313         dev->dev_attrib.hw_max_sectors = UINT_MAX;
314         dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
315         dev->dev_attrib.is_nonrot = 1;
316
317         rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
318
319         pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
320                 " %u pages in %u tables, %lu total bytes\n",
321                 rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
322                 rd_dev->sg_table_count,
323                 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
324
325         return 0;
326
327 fail:
328         rd_release_device_space(rd_dev);
329         return ret;
330 }
331
332 static void rd_dev_call_rcu(struct rcu_head *p)
333 {
334         struct se_device *dev = container_of(p, struct se_device, rcu_head);
335         struct rd_dev *rd_dev = RD_DEV(dev);
336
337         kfree(rd_dev);
338 }
339
340 static void rd_free_device(struct se_device *dev)
341 {
342         struct rd_dev *rd_dev = RD_DEV(dev);
343
344         rd_release_device_space(rd_dev);
345         call_rcu(&dev->rcu_head, rd_dev_call_rcu);
346 }
347
348 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
349 {
350         struct rd_dev_sg_table *sg_table;
351         u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
352                                 sizeof(struct scatterlist));
353
354         i = page / sg_per_table;
355         if (i < rd_dev->sg_table_count) {
356                 sg_table = &rd_dev->sg_table_array[i];
357                 if ((sg_table->page_start_offset <= page) &&
358                     (sg_table->page_end_offset >= page))
359                         return sg_table;
360         }
361
362         pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
363                         page);
364
365         return NULL;
366 }
367
368 static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
369 {
370         struct rd_dev_sg_table *sg_table;
371         u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
372                                 sizeof(struct scatterlist));
373
374         i = page / sg_per_table;
375         if (i < rd_dev->sg_prot_count) {
376                 sg_table = &rd_dev->sg_prot_array[i];
377                 if ((sg_table->page_start_offset <= page) &&
378                      (sg_table->page_end_offset >= page))
379                         return sg_table;
380         }
381
382         pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
383                         page);
384
385         return NULL;
386 }
387
388 static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
389 {
390         struct se_device *se_dev = cmd->se_dev;
391         struct rd_dev *dev = RD_DEV(se_dev);
392         struct rd_dev_sg_table *prot_table;
393         struct scatterlist *prot_sg;
394         u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
395         u32 prot_offset, prot_page;
396         u32 prot_npages __maybe_unused;
397         u64 tmp;
398         sense_reason_t rc = 0;
399
400         tmp = cmd->t_task_lba * se_dev->prot_length;
401         prot_offset = do_div(tmp, PAGE_SIZE);
402         prot_page = tmp;
403
404         prot_table = rd_get_prot_table(dev, prot_page);
405         if (!prot_table)
406                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
407
408         prot_sg = &prot_table->sg_table[prot_page -
409                                         prot_table->page_start_offset];
410
411         if (se_dev->dev_attrib.pi_prot_verify) {
412                 if (is_read)
413                         rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
414                                             prot_sg, prot_offset);
415                 else
416                         rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
417                                             cmd->t_prot_sg, 0);
418         }
419         if (!rc)
420                 sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);
421
422         return rc;
423 }
424
425 static sense_reason_t
426 rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
427               enum dma_data_direction data_direction)
428 {
429         struct se_device *se_dev = cmd->se_dev;
430         struct rd_dev *dev = RD_DEV(se_dev);
431         struct rd_dev_sg_table *table;
432         struct scatterlist *rd_sg;
433         struct sg_mapping_iter m;
434         u32 rd_offset;
435         u32 rd_size;
436         u32 rd_page;
437         u32 src_len;
438         u64 tmp;
439         sense_reason_t rc;
440
441         if (dev->rd_flags & RDF_NULLIO) {
442                 target_complete_cmd(cmd, SAM_STAT_GOOD);
443                 return 0;
444         }
445
446         tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
447         rd_offset = do_div(tmp, PAGE_SIZE);
448         rd_page = tmp;
449         rd_size = cmd->data_length;
450
451         table = rd_get_sg_table(dev, rd_page);
452         if (!table)
453                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
454
455         rd_sg = &table->sg_table[rd_page - table->page_start_offset];
456
457         pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
458                         dev->rd_dev_id,
459                         data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
460                         cmd->t_task_lba, rd_size, rd_page, rd_offset);
461
462         if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
463             data_direction == DMA_TO_DEVICE) {
464                 rc = rd_do_prot_rw(cmd, false);
465                 if (rc)
466                         return rc;
467         }
468
469         src_len = PAGE_SIZE - rd_offset;
470         sg_miter_start(&m, sgl, sgl_nents,
471                         data_direction == DMA_FROM_DEVICE ?
472                                 SG_MITER_TO_SG : SG_MITER_FROM_SG);
473         while (rd_size) {
474                 u32 len;
475                 void *rd_addr;
476
477                 sg_miter_next(&m);
478                 if (!(u32)m.length) {
479                         pr_debug("RD[%u]: invalid sgl %p len %zu\n",
480                                  dev->rd_dev_id, m.addr, m.length);
481                         sg_miter_stop(&m);
482                         return TCM_INCORRECT_AMOUNT_OF_DATA;
483                 }
484                 len = min((u32)m.length, src_len);
485                 if (len > rd_size) {
486                         pr_debug("RD[%u]: size underrun page %d offset %d "
487                                  "size %d\n", dev->rd_dev_id,
488                                  rd_page, rd_offset, rd_size);
489                         len = rd_size;
490                 }
491                 m.consumed = len;
492
493                 rd_addr = sg_virt(rd_sg) + rd_offset;
494
495                 if (data_direction == DMA_FROM_DEVICE)
496                         memcpy(m.addr, rd_addr, len);
497                 else
498                         memcpy(rd_addr, m.addr, len);
499
500                 rd_size -= len;
501                 if (!rd_size)
502                         continue;
503
504                 src_len -= len;
505                 if (src_len) {
506                         rd_offset += len;
507                         continue;
508                 }
509
510                 /* rd page completed, next one please */
511                 rd_page++;
512                 rd_offset = 0;
513                 src_len = PAGE_SIZE;
514                 if (rd_page <= table->page_end_offset) {
515                         rd_sg++;
516                         continue;
517                 }
518
519                 table = rd_get_sg_table(dev, rd_page);
520                 if (!table) {
521                         sg_miter_stop(&m);
522                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
523                 }
524
525                 /* since we increment, the first sg entry is correct */
526                 rd_sg = table->sg_table;
527         }
528         sg_miter_stop(&m);
529
530         if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
531             data_direction == DMA_FROM_DEVICE) {
532                 rc = rd_do_prot_rw(cmd, true);
533                 if (rc)
534                         return rc;
535         }
536
537         target_complete_cmd(cmd, SAM_STAT_GOOD);
538         return 0;
539 }
540
541 enum {
542         Opt_rd_pages, Opt_rd_nullio, Opt_err
543 };
544
545 static match_table_t tokens = {
546         {Opt_rd_pages, "rd_pages=%d"},
547         {Opt_rd_nullio, "rd_nullio=%d"},
548         {Opt_err, NULL}
549 };
550
551 static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
552                 const char *page, ssize_t count)
553 {
554         struct rd_dev *rd_dev = RD_DEV(dev);
555         char *orig, *ptr, *opts;
556         substring_t args[MAX_OPT_ARGS];
557         int ret = 0, arg, token;
558
559         opts = kstrdup(page, GFP_KERNEL);
560         if (!opts)
561                 return -ENOMEM;
562
563         orig = opts;
564
565         while ((ptr = strsep(&opts, ",\n")) != NULL) {
566                 if (!*ptr)
567                         continue;
568
569                 token = match_token(ptr, tokens, args);
570                 switch (token) {
571                 case Opt_rd_pages:
572                         match_int(args, &arg);
573                         rd_dev->rd_page_count = arg;
574                         pr_debug("RAMDISK: Referencing Page"
575                                 " Count: %u\n", rd_dev->rd_page_count);
576                         rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
577                         break;
578                 case Opt_rd_nullio:
579                         match_int(args, &arg);
580                         if (arg != 1)
581                                 break;
582
583                         pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
584                         rd_dev->rd_flags |= RDF_NULLIO;
585                         break;
586                 default:
587                         break;
588                 }
589         }
590
591         kfree(orig);
592         return (!ret) ? count : ret;
593 }
594
595 static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
596 {
597         struct rd_dev *rd_dev = RD_DEV(dev);
598
599         ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
600                         rd_dev->rd_dev_id);
601         bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
602                         "  SG_table_count: %u  nullio: %d\n", rd_dev->rd_page_count,
603                         PAGE_SIZE, rd_dev->sg_table_count,
604                         !!(rd_dev->rd_flags & RDF_NULLIO));
605         return bl;
606 }
607
608 static sector_t rd_get_blocks(struct se_device *dev)
609 {
610         struct rd_dev *rd_dev = RD_DEV(dev);
611
612         unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
613                         dev->dev_attrib.block_size) - 1;
614
615         return blocks_long;
616 }
617
618 static int rd_init_prot(struct se_device *dev)
619 {
620         struct rd_dev *rd_dev = RD_DEV(dev);
621
622         if (!dev->dev_attrib.pi_prot_type)
623                 return 0;
624
625         return rd_build_prot_space(rd_dev, dev->prot_length,
626                                    dev->dev_attrib.block_size);
627 }
628
629 static void rd_free_prot(struct se_device *dev)
630 {
631         struct rd_dev *rd_dev = RD_DEV(dev);
632
633         rd_release_prot_space(rd_dev);
634 }
635
636 static struct sbc_ops rd_sbc_ops = {
637         .execute_rw             = rd_execute_rw,
638 };
639
640 static sense_reason_t
641 rd_parse_cdb(struct se_cmd *cmd)
642 {
643         return sbc_parse_cdb(cmd, &rd_sbc_ops);
644 }
645
646 static const struct target_backend_ops rd_mcp_ops = {
647         .name                   = "rd_mcp",
648         .inquiry_prod           = "RAMDISK-MCP",
649         .inquiry_rev            = RD_MCP_VERSION,
650         .attach_hba             = rd_attach_hba,
651         .detach_hba             = rd_detach_hba,
652         .alloc_device           = rd_alloc_device,
653         .configure_device       = rd_configure_device,
654         .free_device            = rd_free_device,
655         .parse_cdb              = rd_parse_cdb,
656         .set_configfs_dev_params = rd_set_configfs_dev_params,
657         .show_configfs_dev_params = rd_show_configfs_dev_params,
658         .get_device_type        = sbc_get_device_type,
659         .get_blocks             = rd_get_blocks,
660         .init_prot              = rd_init_prot,
661         .free_prot              = rd_free_prot,
662         .tb_dev_attrib_attrs    = sbc_attrib_attrs,
663 };
664
665 int __init rd_module_init(void)
666 {
667         return transport_backend_register(&rd_mcp_ops);
668 }
669
670 void rd_module_exit(void)
671 {
672         target_backend_unregister(&rd_mcp_ops);
673 }