]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/target/target_core_rd.c
Merge tag 'platform-drivers-x86-v3.20-1' of git://git.infradead.org/users/dvhart...
[karo-tx-linux.git] / drivers / target / target_core_rd.c
1 /*******************************************************************************
2  * Filename:  target_core_rd.c
3  *
4  * This file contains the Storage Engine <-> Ramdisk transport
5  * specific functions.
6  *
7  * (c) Copyright 2003-2013 Datera, Inc.
8  *
9  * Nicholas A. Bellinger <nab@kernel.org>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24  *
25  ******************************************************************************/
26
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/timer.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_host.h>
34
35 #include <target/target_core_base.h>
36 #include <target/target_core_backend.h>
37 #include <target/target_core_backend_configfs.h>
38
39 #include "target_core_rd.h"
40
41 static inline struct rd_dev *RD_DEV(struct se_device *dev)
42 {
43         return container_of(dev, struct rd_dev, dev);
44 }
45
46 /*      rd_attach_hba(): (Part of se_subsystem_api_t template)
47  *
48  *
49  */
50 static int rd_attach_hba(struct se_hba *hba, u32 host_id)
51 {
52         struct rd_host *rd_host;
53
54         rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
55         if (!rd_host) {
56                 pr_err("Unable to allocate memory for struct rd_host\n");
57                 return -ENOMEM;
58         }
59
60         rd_host->rd_host_id = host_id;
61
62         hba->hba_ptr = rd_host;
63
64         pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
65                 " Generic Target Core Stack %s\n", hba->hba_id,
66                 RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
67
68         return 0;
69 }
70
71 static void rd_detach_hba(struct se_hba *hba)
72 {
73         struct rd_host *rd_host = hba->hba_ptr;
74
75         pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
76                 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
77
78         kfree(rd_host);
79         hba->hba_ptr = NULL;
80 }
81
82 static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
83                                  u32 sg_table_count)
84 {
85         struct page *pg;
86         struct scatterlist *sg;
87         u32 i, j, page_count = 0, sg_per_table;
88
89         for (i = 0; i < sg_table_count; i++) {
90                 sg = sg_table[i].sg_table;
91                 sg_per_table = sg_table[i].rd_sg_count;
92
93                 for (j = 0; j < sg_per_table; j++) {
94                         pg = sg_page(&sg[j]);
95                         if (pg) {
96                                 __free_page(pg);
97                                 page_count++;
98                         }
99                 }
100                 kfree(sg);
101         }
102
103         kfree(sg_table);
104         return page_count;
105 }
106
107 static void rd_release_device_space(struct rd_dev *rd_dev)
108 {
109         u32 page_count;
110
111         if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
112                 return;
113
114         page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
115                                           rd_dev->sg_table_count);
116
117         pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
118                 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
119                 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
120                 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
121
122         rd_dev->sg_table_array = NULL;
123         rd_dev->sg_table_count = 0;
124 }
125
126
127 /*      rd_build_device_space():
128  *
129  *
130  */
131 static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
132                                  u32 total_sg_needed, unsigned char init_payload)
133 {
134         u32 i = 0, j, page_offset = 0, sg_per_table;
135         u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
136                                 sizeof(struct scatterlist));
137         struct page *pg;
138         struct scatterlist *sg;
139         unsigned char *p;
140
141         while (total_sg_needed) {
142                 sg_per_table = (total_sg_needed > max_sg_per_table) ?
143                         max_sg_per_table : total_sg_needed;
144
145                 sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
146                                 GFP_KERNEL);
147                 if (!sg) {
148                         pr_err("Unable to allocate scatterlist array"
149                                 " for struct rd_dev\n");
150                         return -ENOMEM;
151                 }
152
153                 sg_init_table(sg, sg_per_table);
154
155                 sg_table[i].sg_table = sg;
156                 sg_table[i].rd_sg_count = sg_per_table;
157                 sg_table[i].page_start_offset = page_offset;
158                 sg_table[i++].page_end_offset = (page_offset + sg_per_table)
159                                                 - 1;
160
161                 for (j = 0; j < sg_per_table; j++) {
162                         pg = alloc_pages(GFP_KERNEL, 0);
163                         if (!pg) {
164                                 pr_err("Unable to allocate scatterlist"
165                                         " pages for struct rd_dev_sg_table\n");
166                                 return -ENOMEM;
167                         }
168                         sg_assign_page(&sg[j], pg);
169                         sg[j].length = PAGE_SIZE;
170
171                         p = kmap(pg);
172                         memset(p, init_payload, PAGE_SIZE);
173                         kunmap(pg);
174                 }
175
176                 page_offset += sg_per_table;
177                 total_sg_needed -= sg_per_table;
178         }
179
180         return 0;
181 }
182
183 static int rd_build_device_space(struct rd_dev *rd_dev)
184 {
185         struct rd_dev_sg_table *sg_table;
186         u32 sg_tables, total_sg_needed;
187         u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
188                                 sizeof(struct scatterlist));
189         int rc;
190
191         if (rd_dev->rd_page_count <= 0) {
192                 pr_err("Illegal page count: %u for Ramdisk device\n",
193                        rd_dev->rd_page_count);
194                 return -EINVAL;
195         }
196
197         /* Don't need backing pages for NULLIO */
198         if (rd_dev->rd_flags & RDF_NULLIO)
199                 return 0;
200
201         total_sg_needed = rd_dev->rd_page_count;
202
203         sg_tables = (total_sg_needed / max_sg_per_table) + 1;
204
205         sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
206         if (!sg_table) {
207                 pr_err("Unable to allocate memory for Ramdisk"
208                        " scatterlist tables\n");
209                 return -ENOMEM;
210         }
211
212         rd_dev->sg_table_array = sg_table;
213         rd_dev->sg_table_count = sg_tables;
214
215         rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
216         if (rc)
217                 return rc;
218
219         pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
220                  " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
221                  rd_dev->rd_dev_id, rd_dev->rd_page_count,
222                  rd_dev->sg_table_count);
223
224         return 0;
225 }
226
227 static void rd_release_prot_space(struct rd_dev *rd_dev)
228 {
229         u32 page_count;
230
231         if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
232                 return;
233
234         page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
235                                           rd_dev->sg_prot_count);
236
237         pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
238                  " Device ID: %u, pages %u in %u tables total bytes %lu\n",
239                  rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
240                  rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
241
242         rd_dev->sg_prot_array = NULL;
243         rd_dev->sg_prot_count = 0;
244 }
245
246 static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
247 {
248         struct rd_dev_sg_table *sg_table;
249         u32 total_sg_needed, sg_tables;
250         u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
251                                 sizeof(struct scatterlist));
252         int rc;
253
254         if (rd_dev->rd_flags & RDF_NULLIO)
255                 return 0;
256         /*
257          * prot_length=8byte dif data
258          * tot sg needed = rd_page_count * (PGSZ/block_size) *
259          *                 (prot_length/block_size) + pad
260          * PGSZ canceled each other.
261          */
262         total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
263
264         sg_tables = (total_sg_needed / max_sg_per_table) + 1;
265
266         sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
267         if (!sg_table) {
268                 pr_err("Unable to allocate memory for Ramdisk protection"
269                        " scatterlist tables\n");
270                 return -ENOMEM;
271         }
272
273         rd_dev->sg_prot_array = sg_table;
274         rd_dev->sg_prot_count = sg_tables;
275
276         rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
277         if (rc)
278                 return rc;
279
280         pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
281                  " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
282                  rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
283
284         return 0;
285 }
286
287 static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
288 {
289         struct rd_dev *rd_dev;
290         struct rd_host *rd_host = hba->hba_ptr;
291
292         rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
293         if (!rd_dev) {
294                 pr_err("Unable to allocate memory for struct rd_dev\n");
295                 return NULL;
296         }
297
298         rd_dev->rd_host = rd_host;
299
300         return &rd_dev->dev;
301 }
302
303 static int rd_configure_device(struct se_device *dev)
304 {
305         struct rd_dev *rd_dev = RD_DEV(dev);
306         struct rd_host *rd_host = dev->se_hba->hba_ptr;
307         int ret;
308
309         if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
310                 pr_debug("Missing rd_pages= parameter\n");
311                 return -EINVAL;
312         }
313
314         ret = rd_build_device_space(rd_dev);
315         if (ret < 0)
316                 goto fail;
317
318         dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
319         dev->dev_attrib.hw_max_sectors = UINT_MAX;
320         dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
321
322         rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
323
324         pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
325                 " %u pages in %u tables, %lu total bytes\n",
326                 rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
327                 rd_dev->sg_table_count,
328                 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
329
330         return 0;
331
332 fail:
333         rd_release_device_space(rd_dev);
334         return ret;
335 }
336
337 static void rd_free_device(struct se_device *dev)
338 {
339         struct rd_dev *rd_dev = RD_DEV(dev);
340
341         rd_release_device_space(rd_dev);
342         kfree(rd_dev);
343 }
344
345 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
346 {
347         struct rd_dev_sg_table *sg_table;
348         u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
349                                 sizeof(struct scatterlist));
350
351         i = page / sg_per_table;
352         if (i < rd_dev->sg_table_count) {
353                 sg_table = &rd_dev->sg_table_array[i];
354                 if ((sg_table->page_start_offset <= page) &&
355                     (sg_table->page_end_offset >= page))
356                         return sg_table;
357         }
358
359         pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
360                         page);
361
362         return NULL;
363 }
364
365 static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
366 {
367         struct rd_dev_sg_table *sg_table;
368         u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
369                                 sizeof(struct scatterlist));
370
371         i = page / sg_per_table;
372         if (i < rd_dev->sg_prot_count) {
373                 sg_table = &rd_dev->sg_prot_array[i];
374                 if ((sg_table->page_start_offset <= page) &&
375                      (sg_table->page_end_offset >= page))
376                         return sg_table;
377         }
378
379         pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
380                         page);
381
382         return NULL;
383 }
384
385 static sense_reason_t
386 rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
387               enum dma_data_direction data_direction)
388 {
389         struct se_device *se_dev = cmd->se_dev;
390         struct rd_dev *dev = RD_DEV(se_dev);
391         struct rd_dev_sg_table *table;
392         struct scatterlist *rd_sg;
393         struct sg_mapping_iter m;
394         u32 rd_offset;
395         u32 rd_size;
396         u32 rd_page;
397         u32 src_len;
398         u64 tmp;
399         sense_reason_t rc;
400
401         if (dev->rd_flags & RDF_NULLIO) {
402                 target_complete_cmd(cmd, SAM_STAT_GOOD);
403                 return 0;
404         }
405
406         tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
407         rd_offset = do_div(tmp, PAGE_SIZE);
408         rd_page = tmp;
409         rd_size = cmd->data_length;
410
411         table = rd_get_sg_table(dev, rd_page);
412         if (!table)
413                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
414
415         rd_sg = &table->sg_table[rd_page - table->page_start_offset];
416
417         pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
418                         dev->rd_dev_id,
419                         data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
420                         cmd->t_task_lba, rd_size, rd_page, rd_offset);
421
422         if (cmd->prot_type && data_direction == DMA_TO_DEVICE) {
423                 struct rd_dev_sg_table *prot_table;
424                 struct scatterlist *prot_sg;
425                 u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
426                 u32 prot_offset, prot_page;
427
428                 tmp = cmd->t_task_lba * se_dev->prot_length;
429                 prot_offset = do_div(tmp, PAGE_SIZE);
430                 prot_page = tmp;
431
432                 prot_table = rd_get_prot_table(dev, prot_page);
433                 if (!prot_table)
434                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
435
436                 prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
437
438                 rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0,
439                                           prot_sg, prot_offset);
440                 if (rc)
441                         return rc;
442         }
443
444         src_len = PAGE_SIZE - rd_offset;
445         sg_miter_start(&m, sgl, sgl_nents,
446                         data_direction == DMA_FROM_DEVICE ?
447                                 SG_MITER_TO_SG : SG_MITER_FROM_SG);
448         while (rd_size) {
449                 u32 len;
450                 void *rd_addr;
451
452                 sg_miter_next(&m);
453                 if (!(u32)m.length) {
454                         pr_debug("RD[%u]: invalid sgl %p len %zu\n",
455                                  dev->rd_dev_id, m.addr, m.length);
456                         sg_miter_stop(&m);
457                         return TCM_INCORRECT_AMOUNT_OF_DATA;
458                 }
459                 len = min((u32)m.length, src_len);
460                 if (len > rd_size) {
461                         pr_debug("RD[%u]: size underrun page %d offset %d "
462                                  "size %d\n", dev->rd_dev_id,
463                                  rd_page, rd_offset, rd_size);
464                         len = rd_size;
465                 }
466                 m.consumed = len;
467
468                 rd_addr = sg_virt(rd_sg) + rd_offset;
469
470                 if (data_direction == DMA_FROM_DEVICE)
471                         memcpy(m.addr, rd_addr, len);
472                 else
473                         memcpy(rd_addr, m.addr, len);
474
475                 rd_size -= len;
476                 if (!rd_size)
477                         continue;
478
479                 src_len -= len;
480                 if (src_len) {
481                         rd_offset += len;
482                         continue;
483                 }
484
485                 /* rd page completed, next one please */
486                 rd_page++;
487                 rd_offset = 0;
488                 src_len = PAGE_SIZE;
489                 if (rd_page <= table->page_end_offset) {
490                         rd_sg++;
491                         continue;
492                 }
493
494                 table = rd_get_sg_table(dev, rd_page);
495                 if (!table) {
496                         sg_miter_stop(&m);
497                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
498                 }
499
500                 /* since we increment, the first sg entry is correct */
501                 rd_sg = table->sg_table;
502         }
503         sg_miter_stop(&m);
504
505         if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) {
506                 struct rd_dev_sg_table *prot_table;
507                 struct scatterlist *prot_sg;
508                 u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
509                 u32 prot_offset, prot_page;
510
511                 tmp = cmd->t_task_lba * se_dev->prot_length;
512                 prot_offset = do_div(tmp, PAGE_SIZE);
513                 prot_page = tmp;
514
515                 prot_table = rd_get_prot_table(dev, prot_page);
516                 if (!prot_table)
517                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
518
519                 prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
520
521                 rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
522                                          prot_sg, prot_offset);
523                 if (rc)
524                         return rc;
525         }
526
527         target_complete_cmd(cmd, SAM_STAT_GOOD);
528         return 0;
529 }
530
531 enum {
532         Opt_rd_pages, Opt_rd_nullio, Opt_err
533 };
534
535 static match_table_t tokens = {
536         {Opt_rd_pages, "rd_pages=%d"},
537         {Opt_rd_nullio, "rd_nullio=%d"},
538         {Opt_err, NULL}
539 };
540
541 static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
542                 const char *page, ssize_t count)
543 {
544         struct rd_dev *rd_dev = RD_DEV(dev);
545         char *orig, *ptr, *opts;
546         substring_t args[MAX_OPT_ARGS];
547         int ret = 0, arg, token;
548
549         opts = kstrdup(page, GFP_KERNEL);
550         if (!opts)
551                 return -ENOMEM;
552
553         orig = opts;
554
555         while ((ptr = strsep(&opts, ",\n")) != NULL) {
556                 if (!*ptr)
557                         continue;
558
559                 token = match_token(ptr, tokens, args);
560                 switch (token) {
561                 case Opt_rd_pages:
562                         match_int(args, &arg);
563                         rd_dev->rd_page_count = arg;
564                         pr_debug("RAMDISK: Referencing Page"
565                                 " Count: %u\n", rd_dev->rd_page_count);
566                         rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
567                         break;
568                 case Opt_rd_nullio:
569                         match_int(args, &arg);
570                         if (arg != 1)
571                                 break;
572
573                         pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
574                         rd_dev->rd_flags |= RDF_NULLIO;
575                         break;
576                 default:
577                         break;
578                 }
579         }
580
581         kfree(orig);
582         return (!ret) ? count : ret;
583 }
584
585 static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
586 {
587         struct rd_dev *rd_dev = RD_DEV(dev);
588
589         ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
590                         rd_dev->rd_dev_id);
591         bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
592                         "  SG_table_count: %u  nullio: %d\n", rd_dev->rd_page_count,
593                         PAGE_SIZE, rd_dev->sg_table_count,
594                         !!(rd_dev->rd_flags & RDF_NULLIO));
595         return bl;
596 }
597
598 static sector_t rd_get_blocks(struct se_device *dev)
599 {
600         struct rd_dev *rd_dev = RD_DEV(dev);
601
602         unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
603                         dev->dev_attrib.block_size) - 1;
604
605         return blocks_long;
606 }
607
608 static int rd_init_prot(struct se_device *dev)
609 {
610         struct rd_dev *rd_dev = RD_DEV(dev);
611
612         if (!dev->dev_attrib.pi_prot_type)
613                 return 0;
614
615         return rd_build_prot_space(rd_dev, dev->prot_length,
616                                    dev->dev_attrib.block_size);
617 }
618
619 static void rd_free_prot(struct se_device *dev)
620 {
621         struct rd_dev *rd_dev = RD_DEV(dev);
622
623         rd_release_prot_space(rd_dev);
624 }
625
626 static struct sbc_ops rd_sbc_ops = {
627         .execute_rw             = rd_execute_rw,
628 };
629
630 static sense_reason_t
631 rd_parse_cdb(struct se_cmd *cmd)
632 {
633         return sbc_parse_cdb(cmd, &rd_sbc_ops);
634 }
635
636 DEF_TB_DEFAULT_ATTRIBS(rd_mcp);
637
638 static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
639         &rd_mcp_dev_attrib_emulate_model_alias.attr,
640         &rd_mcp_dev_attrib_emulate_dpo.attr,
641         &rd_mcp_dev_attrib_emulate_fua_write.attr,
642         &rd_mcp_dev_attrib_emulate_fua_read.attr,
643         &rd_mcp_dev_attrib_emulate_write_cache.attr,
644         &rd_mcp_dev_attrib_emulate_ua_intlck_ctrl.attr,
645         &rd_mcp_dev_attrib_emulate_tas.attr,
646         &rd_mcp_dev_attrib_emulate_tpu.attr,
647         &rd_mcp_dev_attrib_emulate_tpws.attr,
648         &rd_mcp_dev_attrib_emulate_caw.attr,
649         &rd_mcp_dev_attrib_emulate_3pc.attr,
650         &rd_mcp_dev_attrib_pi_prot_type.attr,
651         &rd_mcp_dev_attrib_hw_pi_prot_type.attr,
652         &rd_mcp_dev_attrib_pi_prot_format.attr,
653         &rd_mcp_dev_attrib_enforce_pr_isids.attr,
654         &rd_mcp_dev_attrib_is_nonrot.attr,
655         &rd_mcp_dev_attrib_emulate_rest_reord.attr,
656         &rd_mcp_dev_attrib_force_pr_aptpl.attr,
657         &rd_mcp_dev_attrib_hw_block_size.attr,
658         &rd_mcp_dev_attrib_block_size.attr,
659         &rd_mcp_dev_attrib_hw_max_sectors.attr,
660         &rd_mcp_dev_attrib_optimal_sectors.attr,
661         &rd_mcp_dev_attrib_hw_queue_depth.attr,
662         &rd_mcp_dev_attrib_queue_depth.attr,
663         &rd_mcp_dev_attrib_max_unmap_lba_count.attr,
664         &rd_mcp_dev_attrib_max_unmap_block_desc_count.attr,
665         &rd_mcp_dev_attrib_unmap_granularity.attr,
666         &rd_mcp_dev_attrib_unmap_granularity_alignment.attr,
667         &rd_mcp_dev_attrib_max_write_same_len.attr,
668         NULL,
669 };
670
671 static struct se_subsystem_api rd_mcp_template = {
672         .name                   = "rd_mcp",
673         .inquiry_prod           = "RAMDISK-MCP",
674         .inquiry_rev            = RD_MCP_VERSION,
675         .transport_type         = TRANSPORT_PLUGIN_VHBA_VDEV,
676         .attach_hba             = rd_attach_hba,
677         .detach_hba             = rd_detach_hba,
678         .alloc_device           = rd_alloc_device,
679         .configure_device       = rd_configure_device,
680         .free_device            = rd_free_device,
681         .parse_cdb              = rd_parse_cdb,
682         .set_configfs_dev_params = rd_set_configfs_dev_params,
683         .show_configfs_dev_params = rd_show_configfs_dev_params,
684         .get_device_type        = sbc_get_device_type,
685         .get_blocks             = rd_get_blocks,
686         .init_prot              = rd_init_prot,
687         .free_prot              = rd_free_prot,
688 };
689
690 int __init rd_module_init(void)
691 {
692         struct target_backend_cits *tbc = &rd_mcp_template.tb_cits;
693         int ret;
694
695         target_core_setup_sub_cits(&rd_mcp_template);
696         tbc->tb_dev_attrib_cit.ct_attrs = rd_mcp_backend_dev_attrs;
697
698         ret = transport_subsystem_register(&rd_mcp_template);
699         if (ret < 0) {
700                 return ret;
701         }
702
703         return 0;
704 }
705
706 void rd_module_exit(void)
707 {
708         transport_subsystem_release(&rd_mcp_template);
709 }