]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/target/target_core_rd.c
8882686a12c07ed47d8f2f0b38ee8e0262d21ffb
[karo-tx-linux.git] / drivers / target / target_core_rd.c
1 /*******************************************************************************
2  * Filename:  target_core_rd.c
3  *
4  * This file contains the Storage Engine <-> Ramdisk transport
5  * specific functions.
6  *
7  * (c) Copyright 2003-2013 Datera, Inc.
8  *
9  * Nicholas A. Bellinger <nab@kernel.org>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24  *
25  ******************************************************************************/
26
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/timer.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <scsi/scsi_proto.h>
33
34 #include <target/target_core_base.h>
35 #include <target/target_core_backend.h>
36 #include <target/target_core_backend_configfs.h>
37
38 #include "target_core_rd.h"
39
40 static inline struct rd_dev *RD_DEV(struct se_device *dev)
41 {
42         return container_of(dev, struct rd_dev, dev);
43 }
44
45 /*      rd_attach_hba(): (Part of se_subsystem_api_t template)
46  *
47  *
48  */
49 static int rd_attach_hba(struct se_hba *hba, u32 host_id)
50 {
51         struct rd_host *rd_host;
52
53         rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
54         if (!rd_host) {
55                 pr_err("Unable to allocate memory for struct rd_host\n");
56                 return -ENOMEM;
57         }
58
59         rd_host->rd_host_id = host_id;
60
61         hba->hba_ptr = rd_host;
62
63         pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
64                 " Generic Target Core Stack %s\n", hba->hba_id,
65                 RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
66
67         return 0;
68 }
69
70 static void rd_detach_hba(struct se_hba *hba)
71 {
72         struct rd_host *rd_host = hba->hba_ptr;
73
74         pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
75                 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
76
77         kfree(rd_host);
78         hba->hba_ptr = NULL;
79 }
80
81 static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
82                                  u32 sg_table_count)
83 {
84         struct page *pg;
85         struct scatterlist *sg;
86         u32 i, j, page_count = 0, sg_per_table;
87
88         for (i = 0; i < sg_table_count; i++) {
89                 sg = sg_table[i].sg_table;
90                 sg_per_table = sg_table[i].rd_sg_count;
91
92                 for (j = 0; j < sg_per_table; j++) {
93                         pg = sg_page(&sg[j]);
94                         if (pg) {
95                                 __free_page(pg);
96                                 page_count++;
97                         }
98                 }
99                 kfree(sg);
100         }
101
102         kfree(sg_table);
103         return page_count;
104 }
105
106 static void rd_release_device_space(struct rd_dev *rd_dev)
107 {
108         u32 page_count;
109
110         if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
111                 return;
112
113         page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
114                                           rd_dev->sg_table_count);
115
116         pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
117                 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
118                 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
119                 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
120
121         rd_dev->sg_table_array = NULL;
122         rd_dev->sg_table_count = 0;
123 }
124
125
126 /*      rd_build_device_space():
127  *
128  *
129  */
130 static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
131                                  u32 total_sg_needed, unsigned char init_payload)
132 {
133         u32 i = 0, j, page_offset = 0, sg_per_table;
134         u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
135                                 sizeof(struct scatterlist));
136         struct page *pg;
137         struct scatterlist *sg;
138         unsigned char *p;
139
140         while (total_sg_needed) {
141                 unsigned int chain_entry = 0;
142
143                 sg_per_table = (total_sg_needed > max_sg_per_table) ?
144                         max_sg_per_table : total_sg_needed;
145
146 #ifdef CONFIG_ARCH_HAS_SG_CHAIN
147
148                 /*
149                  * Reserve extra element for chain entry
150                  */
151                 if (sg_per_table < total_sg_needed)
152                         chain_entry = 1;
153
154 #endif /* CONFIG_ARCH_HAS_SG_CHAIN */
155
156                 sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
157                                 GFP_KERNEL);
158                 if (!sg) {
159                         pr_err("Unable to allocate scatterlist array"
160                                 " for struct rd_dev\n");
161                         return -ENOMEM;
162                 }
163
164                 sg_init_table(sg, sg_per_table + chain_entry);
165
166 #ifdef CONFIG_ARCH_HAS_SG_CHAIN
167
168                 if (i > 0) {
169                         sg_chain(sg_table[i - 1].sg_table,
170                                  max_sg_per_table + 1, sg);
171                 }
172
173 #endif /* CONFIG_ARCH_HAS_SG_CHAIN */
174
175                 sg_table[i].sg_table = sg;
176                 sg_table[i].rd_sg_count = sg_per_table;
177                 sg_table[i].page_start_offset = page_offset;
178                 sg_table[i++].page_end_offset = (page_offset + sg_per_table)
179                                                 - 1;
180
181                 for (j = 0; j < sg_per_table; j++) {
182                         pg = alloc_pages(GFP_KERNEL, 0);
183                         if (!pg) {
184                                 pr_err("Unable to allocate scatterlist"
185                                         " pages for struct rd_dev_sg_table\n");
186                                 return -ENOMEM;
187                         }
188                         sg_assign_page(&sg[j], pg);
189                         sg[j].length = PAGE_SIZE;
190
191                         p = kmap(pg);
192                         memset(p, init_payload, PAGE_SIZE);
193                         kunmap(pg);
194                 }
195
196                 page_offset += sg_per_table;
197                 total_sg_needed -= sg_per_table;
198         }
199
200         return 0;
201 }
202
203 static int rd_build_device_space(struct rd_dev *rd_dev)
204 {
205         struct rd_dev_sg_table *sg_table;
206         u32 sg_tables, total_sg_needed;
207         u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
208                                 sizeof(struct scatterlist));
209         int rc;
210
211         if (rd_dev->rd_page_count <= 0) {
212                 pr_err("Illegal page count: %u for Ramdisk device\n",
213                        rd_dev->rd_page_count);
214                 return -EINVAL;
215         }
216
217         /* Don't need backing pages for NULLIO */
218         if (rd_dev->rd_flags & RDF_NULLIO)
219                 return 0;
220
221         total_sg_needed = rd_dev->rd_page_count;
222
223         sg_tables = (total_sg_needed / max_sg_per_table) + 1;
224
225         sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
226         if (!sg_table) {
227                 pr_err("Unable to allocate memory for Ramdisk"
228                        " scatterlist tables\n");
229                 return -ENOMEM;
230         }
231
232         rd_dev->sg_table_array = sg_table;
233         rd_dev->sg_table_count = sg_tables;
234
235         rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
236         if (rc)
237                 return rc;
238
239         pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
240                  " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
241                  rd_dev->rd_dev_id, rd_dev->rd_page_count,
242                  rd_dev->sg_table_count);
243
244         return 0;
245 }
246
247 static void rd_release_prot_space(struct rd_dev *rd_dev)
248 {
249         u32 page_count;
250
251         if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
252                 return;
253
254         page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
255                                           rd_dev->sg_prot_count);
256
257         pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
258                  " Device ID: %u, pages %u in %u tables total bytes %lu\n",
259                  rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
260                  rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
261
262         rd_dev->sg_prot_array = NULL;
263         rd_dev->sg_prot_count = 0;
264 }
265
266 static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
267 {
268         struct rd_dev_sg_table *sg_table;
269         u32 total_sg_needed, sg_tables;
270         u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
271                                 sizeof(struct scatterlist));
272         int rc;
273
274         if (rd_dev->rd_flags & RDF_NULLIO)
275                 return 0;
276         /*
277          * prot_length=8byte dif data
278          * tot sg needed = rd_page_count * (PGSZ/block_size) *
279          *                 (prot_length/block_size) + pad
280          * PGSZ canceled each other.
281          */
282         total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
283
284         sg_tables = (total_sg_needed / max_sg_per_table) + 1;
285
286         sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
287         if (!sg_table) {
288                 pr_err("Unable to allocate memory for Ramdisk protection"
289                        " scatterlist tables\n");
290                 return -ENOMEM;
291         }
292
293         rd_dev->sg_prot_array = sg_table;
294         rd_dev->sg_prot_count = sg_tables;
295
296         rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
297         if (rc)
298                 return rc;
299
300         pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
301                  " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
302                  rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
303
304         return 0;
305 }
306
307 static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
308 {
309         struct rd_dev *rd_dev;
310         struct rd_host *rd_host = hba->hba_ptr;
311
312         rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
313         if (!rd_dev) {
314                 pr_err("Unable to allocate memory for struct rd_dev\n");
315                 return NULL;
316         }
317
318         rd_dev->rd_host = rd_host;
319
320         return &rd_dev->dev;
321 }
322
323 static int rd_configure_device(struct se_device *dev)
324 {
325         struct rd_dev *rd_dev = RD_DEV(dev);
326         struct rd_host *rd_host = dev->se_hba->hba_ptr;
327         int ret;
328
329         if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
330                 pr_debug("Missing rd_pages= parameter\n");
331                 return -EINVAL;
332         }
333
334         ret = rd_build_device_space(rd_dev);
335         if (ret < 0)
336                 goto fail;
337
338         dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
339         dev->dev_attrib.hw_max_sectors = UINT_MAX;
340         dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
341
342         rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
343
344         pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
345                 " %u pages in %u tables, %lu total bytes\n",
346                 rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
347                 rd_dev->sg_table_count,
348                 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
349
350         return 0;
351
352 fail:
353         rd_release_device_space(rd_dev);
354         return ret;
355 }
356
357 static void rd_free_device(struct se_device *dev)
358 {
359         struct rd_dev *rd_dev = RD_DEV(dev);
360
361         rd_release_device_space(rd_dev);
362         kfree(rd_dev);
363 }
364
365 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
366 {
367         struct rd_dev_sg_table *sg_table;
368         u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
369                                 sizeof(struct scatterlist));
370
371         i = page / sg_per_table;
372         if (i < rd_dev->sg_table_count) {
373                 sg_table = &rd_dev->sg_table_array[i];
374                 if ((sg_table->page_start_offset <= page) &&
375                     (sg_table->page_end_offset >= page))
376                         return sg_table;
377         }
378
379         pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
380                         page);
381
382         return NULL;
383 }
384
385 static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
386 {
387         struct rd_dev_sg_table *sg_table;
388         u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
389                                 sizeof(struct scatterlist));
390
391         i = page / sg_per_table;
392         if (i < rd_dev->sg_prot_count) {
393                 sg_table = &rd_dev->sg_prot_array[i];
394                 if ((sg_table->page_start_offset <= page) &&
395                      (sg_table->page_end_offset >= page))
396                         return sg_table;
397         }
398
399         pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
400                         page);
401
402         return NULL;
403 }
404
405 typedef sense_reason_t (*dif_verify)(struct se_cmd *, sector_t, unsigned int,
406                                      unsigned int, struct scatterlist *, int);
407
408 static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, dif_verify dif_verify)
409 {
410         struct se_device *se_dev = cmd->se_dev;
411         struct rd_dev *dev = RD_DEV(se_dev);
412         struct rd_dev_sg_table *prot_table;
413         bool need_to_release = false;
414         struct scatterlist *prot_sg;
415         u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
416         u32 prot_offset, prot_page;
417         u32 prot_npages __maybe_unused;
418         u64 tmp;
419         sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
420
421         tmp = cmd->t_task_lba * se_dev->prot_length;
422         prot_offset = do_div(tmp, PAGE_SIZE);
423         prot_page = tmp;
424
425         prot_table = rd_get_prot_table(dev, prot_page);
426         if (!prot_table)
427                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
428
429         prot_sg = &prot_table->sg_table[prot_page -
430                                         prot_table->page_start_offset];
431
432 #ifndef CONFIG_ARCH_HAS_SG_CHAIN
433
434         prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length,
435                                    PAGE_SIZE);
436
437         /*
438          * Allocate temporaly contiguous scatterlist entries if prot pages
439          * straddles multiple scatterlist tables.
440          */
441         if (prot_table->page_end_offset < prot_page + prot_npages - 1) {
442                 int i;
443
444                 prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL);
445                 if (!prot_sg)
446                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
447
448                 need_to_release = true;
449                 sg_init_table(prot_sg, prot_npages);
450
451                 for (i = 0; i < prot_npages; i++) {
452                         if (prot_page + i > prot_table->page_end_offset) {
453                                 prot_table = rd_get_prot_table(dev,
454                                                                 prot_page + i);
455                                 if (!prot_table) {
456                                         kfree(prot_sg);
457                                         return rc;
458                                 }
459                                 sg_unmark_end(&prot_sg[i - 1]);
460                         }
461                         prot_sg[i] = prot_table->sg_table[prot_page + i -
462                                                 prot_table->page_start_offset];
463                 }
464         }
465
466 #endif /* !CONFIG_ARCH_HAS_SG_CHAIN */
467
468         rc = dif_verify(cmd, cmd->t_task_lba, sectors, 0, prot_sg, prot_offset);
469         if (need_to_release)
470                 kfree(prot_sg);
471
472         return rc;
473 }
474
475 static sense_reason_t
476 rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
477               enum dma_data_direction data_direction)
478 {
479         struct se_device *se_dev = cmd->se_dev;
480         struct rd_dev *dev = RD_DEV(se_dev);
481         struct rd_dev_sg_table *table;
482         struct scatterlist *rd_sg;
483         struct sg_mapping_iter m;
484         u32 rd_offset;
485         u32 rd_size;
486         u32 rd_page;
487         u32 src_len;
488         u64 tmp;
489         sense_reason_t rc;
490
491         if (dev->rd_flags & RDF_NULLIO) {
492                 target_complete_cmd(cmd, SAM_STAT_GOOD);
493                 return 0;
494         }
495
496         tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
497         rd_offset = do_div(tmp, PAGE_SIZE);
498         rd_page = tmp;
499         rd_size = cmd->data_length;
500
501         table = rd_get_sg_table(dev, rd_page);
502         if (!table)
503                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
504
505         rd_sg = &table->sg_table[rd_page - table->page_start_offset];
506
507         pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
508                         dev->rd_dev_id,
509                         data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
510                         cmd->t_task_lba, rd_size, rd_page, rd_offset);
511
512         if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
513             data_direction == DMA_TO_DEVICE) {
514                 rc = rd_do_prot_rw(cmd, sbc_dif_verify_write);
515                 if (rc)
516                         return rc;
517         }
518
519         src_len = PAGE_SIZE - rd_offset;
520         sg_miter_start(&m, sgl, sgl_nents,
521                         data_direction == DMA_FROM_DEVICE ?
522                                 SG_MITER_TO_SG : SG_MITER_FROM_SG);
523         while (rd_size) {
524                 u32 len;
525                 void *rd_addr;
526
527                 sg_miter_next(&m);
528                 if (!(u32)m.length) {
529                         pr_debug("RD[%u]: invalid sgl %p len %zu\n",
530                                  dev->rd_dev_id, m.addr, m.length);
531                         sg_miter_stop(&m);
532                         return TCM_INCORRECT_AMOUNT_OF_DATA;
533                 }
534                 len = min((u32)m.length, src_len);
535                 if (len > rd_size) {
536                         pr_debug("RD[%u]: size underrun page %d offset %d "
537                                  "size %d\n", dev->rd_dev_id,
538                                  rd_page, rd_offset, rd_size);
539                         len = rd_size;
540                 }
541                 m.consumed = len;
542
543                 rd_addr = sg_virt(rd_sg) + rd_offset;
544
545                 if (data_direction == DMA_FROM_DEVICE)
546                         memcpy(m.addr, rd_addr, len);
547                 else
548                         memcpy(rd_addr, m.addr, len);
549
550                 rd_size -= len;
551                 if (!rd_size)
552                         continue;
553
554                 src_len -= len;
555                 if (src_len) {
556                         rd_offset += len;
557                         continue;
558                 }
559
560                 /* rd page completed, next one please */
561                 rd_page++;
562                 rd_offset = 0;
563                 src_len = PAGE_SIZE;
564                 if (rd_page <= table->page_end_offset) {
565                         rd_sg++;
566                         continue;
567                 }
568
569                 table = rd_get_sg_table(dev, rd_page);
570                 if (!table) {
571                         sg_miter_stop(&m);
572                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
573                 }
574
575                 /* since we increment, the first sg entry is correct */
576                 rd_sg = table->sg_table;
577         }
578         sg_miter_stop(&m);
579
580         if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
581             data_direction == DMA_FROM_DEVICE) {
582                 rc = rd_do_prot_rw(cmd, sbc_dif_verify_read);
583                 if (rc)
584                         return rc;
585         }
586
587         target_complete_cmd(cmd, SAM_STAT_GOOD);
588         return 0;
589 }
590
591 enum {
592         Opt_rd_pages, Opt_rd_nullio, Opt_err
593 };
594
595 static match_table_t tokens = {
596         {Opt_rd_pages, "rd_pages=%d"},
597         {Opt_rd_nullio, "rd_nullio=%d"},
598         {Opt_err, NULL}
599 };
600
601 static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
602                 const char *page, ssize_t count)
603 {
604         struct rd_dev *rd_dev = RD_DEV(dev);
605         char *orig, *ptr, *opts;
606         substring_t args[MAX_OPT_ARGS];
607         int ret = 0, arg, token;
608
609         opts = kstrdup(page, GFP_KERNEL);
610         if (!opts)
611                 return -ENOMEM;
612
613         orig = opts;
614
615         while ((ptr = strsep(&opts, ",\n")) != NULL) {
616                 if (!*ptr)
617                         continue;
618
619                 token = match_token(ptr, tokens, args);
620                 switch (token) {
621                 case Opt_rd_pages:
622                         match_int(args, &arg);
623                         rd_dev->rd_page_count = arg;
624                         pr_debug("RAMDISK: Referencing Page"
625                                 " Count: %u\n", rd_dev->rd_page_count);
626                         rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
627                         break;
628                 case Opt_rd_nullio:
629                         match_int(args, &arg);
630                         if (arg != 1)
631                                 break;
632
633                         pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
634                         rd_dev->rd_flags |= RDF_NULLIO;
635                         break;
636                 default:
637                         break;
638                 }
639         }
640
641         kfree(orig);
642         return (!ret) ? count : ret;
643 }
644
645 static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
646 {
647         struct rd_dev *rd_dev = RD_DEV(dev);
648
649         ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
650                         rd_dev->rd_dev_id);
651         bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
652                         "  SG_table_count: %u  nullio: %d\n", rd_dev->rd_page_count,
653                         PAGE_SIZE, rd_dev->sg_table_count,
654                         !!(rd_dev->rd_flags & RDF_NULLIO));
655         return bl;
656 }
657
658 static sector_t rd_get_blocks(struct se_device *dev)
659 {
660         struct rd_dev *rd_dev = RD_DEV(dev);
661
662         unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
663                         dev->dev_attrib.block_size) - 1;
664
665         return blocks_long;
666 }
667
668 static int rd_init_prot(struct se_device *dev)
669 {
670         struct rd_dev *rd_dev = RD_DEV(dev);
671
672         if (!dev->dev_attrib.pi_prot_type)
673                 return 0;
674
675         return rd_build_prot_space(rd_dev, dev->prot_length,
676                                    dev->dev_attrib.block_size);
677 }
678
679 static void rd_free_prot(struct se_device *dev)
680 {
681         struct rd_dev *rd_dev = RD_DEV(dev);
682
683         rd_release_prot_space(rd_dev);
684 }
685
686 static struct sbc_ops rd_sbc_ops = {
687         .execute_rw             = rd_execute_rw,
688 };
689
690 static sense_reason_t
691 rd_parse_cdb(struct se_cmd *cmd)
692 {
693         return sbc_parse_cdb(cmd, &rd_sbc_ops);
694 }
695
696 DEF_TB_DEFAULT_ATTRIBS(rd_mcp);
697
698 static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
699         &rd_mcp_dev_attrib_emulate_model_alias.attr,
700         &rd_mcp_dev_attrib_emulate_dpo.attr,
701         &rd_mcp_dev_attrib_emulate_fua_write.attr,
702         &rd_mcp_dev_attrib_emulate_fua_read.attr,
703         &rd_mcp_dev_attrib_emulate_write_cache.attr,
704         &rd_mcp_dev_attrib_emulate_ua_intlck_ctrl.attr,
705         &rd_mcp_dev_attrib_emulate_tas.attr,
706         &rd_mcp_dev_attrib_emulate_tpu.attr,
707         &rd_mcp_dev_attrib_emulate_tpws.attr,
708         &rd_mcp_dev_attrib_emulate_caw.attr,
709         &rd_mcp_dev_attrib_emulate_3pc.attr,
710         &rd_mcp_dev_attrib_pi_prot_type.attr,
711         &rd_mcp_dev_attrib_hw_pi_prot_type.attr,
712         &rd_mcp_dev_attrib_pi_prot_format.attr,
713         &rd_mcp_dev_attrib_enforce_pr_isids.attr,
714         &rd_mcp_dev_attrib_is_nonrot.attr,
715         &rd_mcp_dev_attrib_emulate_rest_reord.attr,
716         &rd_mcp_dev_attrib_force_pr_aptpl.attr,
717         &rd_mcp_dev_attrib_hw_block_size.attr,
718         &rd_mcp_dev_attrib_block_size.attr,
719         &rd_mcp_dev_attrib_hw_max_sectors.attr,
720         &rd_mcp_dev_attrib_optimal_sectors.attr,
721         &rd_mcp_dev_attrib_hw_queue_depth.attr,
722         &rd_mcp_dev_attrib_queue_depth.attr,
723         &rd_mcp_dev_attrib_max_unmap_lba_count.attr,
724         &rd_mcp_dev_attrib_max_unmap_block_desc_count.attr,
725         &rd_mcp_dev_attrib_unmap_granularity.attr,
726         &rd_mcp_dev_attrib_unmap_granularity_alignment.attr,
727         &rd_mcp_dev_attrib_max_write_same_len.attr,
728         NULL,
729 };
730
731 static struct se_subsystem_api rd_mcp_template = {
732         .name                   = "rd_mcp",
733         .inquiry_prod           = "RAMDISK-MCP",
734         .inquiry_rev            = RD_MCP_VERSION,
735         .transport_type         = TRANSPORT_PLUGIN_VHBA_VDEV,
736         .attach_hba             = rd_attach_hba,
737         .detach_hba             = rd_detach_hba,
738         .alloc_device           = rd_alloc_device,
739         .configure_device       = rd_configure_device,
740         .free_device            = rd_free_device,
741         .parse_cdb              = rd_parse_cdb,
742         .set_configfs_dev_params = rd_set_configfs_dev_params,
743         .show_configfs_dev_params = rd_show_configfs_dev_params,
744         .get_device_type        = sbc_get_device_type,
745         .get_blocks             = rd_get_blocks,
746         .init_prot              = rd_init_prot,
747         .free_prot              = rd_free_prot,
748 };
749
750 int __init rd_module_init(void)
751 {
752         struct target_backend_cits *tbc = &rd_mcp_template.tb_cits;
753         int ret;
754
755         target_core_setup_sub_cits(&rd_mcp_template);
756         tbc->tb_dev_attrib_cit.ct_attrs = rd_mcp_backend_dev_attrs;
757
758         ret = transport_subsystem_register(&rd_mcp_template);
759         if (ret < 0) {
760                 return ret;
761         }
762
763         return 0;
764 }
765
766 void rd_module_exit(void)
767 {
768         transport_subsystem_release(&rd_mcp_template);
769 }