1 /*******************************************************************************
2 * Filename: target_core_rd.c
4 * This file contains the Storage Engine <-> Ramdisk transport
7 * (c) Copyright 2003-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/timer.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_host.h>
35 #include <target/target_core_base.h>
36 #include <target/target_core_backend.h>
38 #include "target_core_rd.h"
40 static inline struct rd_dev *RD_DEV(struct se_device *dev)
42 return container_of(dev, struct rd_dev, dev);
45 static int rd_attach_hba(struct se_hba *hba, u32 host_id)
47 struct rd_host *rd_host;
49 rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
51 pr_err("Unable to allocate memory for struct rd_host\n");
55 rd_host->rd_host_id = host_id;
57 hba->hba_ptr = rd_host;
59 pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
60 " Generic Target Core Stack %s\n", hba->hba_id,
61 RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
66 static void rd_detach_hba(struct se_hba *hba)
68 struct rd_host *rd_host = hba->hba_ptr;
70 pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
71 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
77 static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
81 struct scatterlist *sg;
82 u32 i, j, page_count = 0, sg_per_table;
84 for (i = 0; i < sg_table_count; i++) {
85 sg = sg_table[i].sg_table;
86 sg_per_table = sg_table[i].rd_sg_count;
88 for (j = 0; j < sg_per_table; j++) {
102 static void rd_release_device_space(struct rd_dev *rd_dev)
106 if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
109 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
110 rd_dev->sg_table_count);
112 pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
113 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
114 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
115 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
117 rd_dev->sg_table_array = NULL;
118 rd_dev->sg_table_count = 0;
122 /* rd_build_device_space():
126 static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
127 u32 total_sg_needed, unsigned char init_payload)
129 u32 i = 0, j, page_offset = 0, sg_per_table;
130 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
131 sizeof(struct scatterlist));
133 struct scatterlist *sg;
136 while (total_sg_needed) {
137 unsigned int chain_entry = 0;
139 sg_per_table = (total_sg_needed > max_sg_per_table) ?
140 max_sg_per_table : total_sg_needed;
142 #ifdef CONFIG_ARCH_HAS_SG_CHAIN
145 * Reserve extra element for chain entry
147 if (sg_per_table < total_sg_needed)
150 #endif /* CONFIG_ARCH_HAS_SG_CHAIN */
152 sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
155 pr_err("Unable to allocate scatterlist array"
156 " for struct rd_dev\n");
160 sg_init_table(sg, sg_per_table + chain_entry);
162 #ifdef CONFIG_ARCH_HAS_SG_CHAIN
165 sg_chain(sg_table[i - 1].sg_table,
166 max_sg_per_table + 1, sg);
169 #endif /* CONFIG_ARCH_HAS_SG_CHAIN */
171 sg_table[i].sg_table = sg;
172 sg_table[i].rd_sg_count = sg_per_table;
173 sg_table[i].page_start_offset = page_offset;
174 sg_table[i++].page_end_offset = (page_offset + sg_per_table)
177 for (j = 0; j < sg_per_table; j++) {
178 pg = alloc_pages(GFP_KERNEL, 0);
180 pr_err("Unable to allocate scatterlist"
181 " pages for struct rd_dev_sg_table\n");
184 sg_assign_page(&sg[j], pg);
185 sg[j].length = PAGE_SIZE;
188 memset(p, init_payload, PAGE_SIZE);
192 page_offset += sg_per_table;
193 total_sg_needed -= sg_per_table;
199 static int rd_build_device_space(struct rd_dev *rd_dev)
201 struct rd_dev_sg_table *sg_table;
202 u32 sg_tables, total_sg_needed;
203 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
204 sizeof(struct scatterlist));
207 if (rd_dev->rd_page_count <= 0) {
208 pr_err("Illegal page count: %u for Ramdisk device\n",
209 rd_dev->rd_page_count);
213 /* Don't need backing pages for NULLIO */
214 if (rd_dev->rd_flags & RDF_NULLIO)
217 total_sg_needed = rd_dev->rd_page_count;
219 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
221 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
223 pr_err("Unable to allocate memory for Ramdisk"
224 " scatterlist tables\n");
228 rd_dev->sg_table_array = sg_table;
229 rd_dev->sg_table_count = sg_tables;
231 rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
235 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
236 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
237 rd_dev->rd_dev_id, rd_dev->rd_page_count,
238 rd_dev->sg_table_count);
243 static void rd_release_prot_space(struct rd_dev *rd_dev)
247 if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
250 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
251 rd_dev->sg_prot_count);
253 pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
254 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
255 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
256 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
258 rd_dev->sg_prot_array = NULL;
259 rd_dev->sg_prot_count = 0;
262 static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
264 struct rd_dev_sg_table *sg_table;
265 u32 total_sg_needed, sg_tables;
266 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
267 sizeof(struct scatterlist));
270 if (rd_dev->rd_flags & RDF_NULLIO)
273 * prot_length=8byte dif data
274 * tot sg needed = rd_page_count * (PGSZ/block_size) *
275 * (prot_length/block_size) + pad
276 * PGSZ canceled each other.
278 total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
280 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
282 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
284 pr_err("Unable to allocate memory for Ramdisk protection"
285 " scatterlist tables\n");
289 rd_dev->sg_prot_array = sg_table;
290 rd_dev->sg_prot_count = sg_tables;
292 rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
296 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
297 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
298 rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
303 static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
305 struct rd_dev *rd_dev;
306 struct rd_host *rd_host = hba->hba_ptr;
308 rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
310 pr_err("Unable to allocate memory for struct rd_dev\n");
314 rd_dev->rd_host = rd_host;
319 static int rd_configure_device(struct se_device *dev)
321 struct rd_dev *rd_dev = RD_DEV(dev);
322 struct rd_host *rd_host = dev->se_hba->hba_ptr;
325 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
326 pr_debug("Missing rd_pages= parameter\n");
330 ret = rd_build_device_space(rd_dev);
334 dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
335 dev->dev_attrib.hw_max_sectors = UINT_MAX;
336 dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
338 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
340 pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
341 " %u pages in %u tables, %lu total bytes\n",
342 rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
343 rd_dev->sg_table_count,
344 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
349 rd_release_device_space(rd_dev);
353 static void rd_free_device(struct se_device *dev)
355 struct rd_dev *rd_dev = RD_DEV(dev);
357 rd_release_device_space(rd_dev);
361 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
363 struct rd_dev_sg_table *sg_table;
364 u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
365 sizeof(struct scatterlist));
367 i = page / sg_per_table;
368 if (i < rd_dev->sg_table_count) {
369 sg_table = &rd_dev->sg_table_array[i];
370 if ((sg_table->page_start_offset <= page) &&
371 (sg_table->page_end_offset >= page))
375 pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
381 static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
383 struct rd_dev_sg_table *sg_table;
384 u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
385 sizeof(struct scatterlist));
387 i = page / sg_per_table;
388 if (i < rd_dev->sg_prot_count) {
389 sg_table = &rd_dev->sg_prot_array[i];
390 if ((sg_table->page_start_offset <= page) &&
391 (sg_table->page_end_offset >= page))
395 pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
401 static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
403 struct se_device *se_dev = cmd->se_dev;
404 struct rd_dev *dev = RD_DEV(se_dev);
405 struct rd_dev_sg_table *prot_table;
406 bool need_to_release = false;
407 struct scatterlist *prot_sg;
408 u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
409 u32 prot_offset, prot_page;
410 u32 prot_npages __maybe_unused;
412 sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
414 tmp = cmd->t_task_lba * se_dev->prot_length;
415 prot_offset = do_div(tmp, PAGE_SIZE);
418 prot_table = rd_get_prot_table(dev, prot_page);
420 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
422 prot_sg = &prot_table->sg_table[prot_page -
423 prot_table->page_start_offset];
425 #ifndef CONFIG_ARCH_HAS_SG_CHAIN
427 prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length,
431 * Allocate temporaly contiguous scatterlist entries if prot pages
432 * straddles multiple scatterlist tables.
434 if (prot_table->page_end_offset < prot_page + prot_npages - 1) {
437 prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL);
439 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
441 need_to_release = true;
442 sg_init_table(prot_sg, prot_npages);
444 for (i = 0; i < prot_npages; i++) {
445 if (prot_page + i > prot_table->page_end_offset) {
446 prot_table = rd_get_prot_table(dev,
452 sg_unmark_end(&prot_sg[i - 1]);
454 prot_sg[i] = prot_table->sg_table[prot_page + i -
455 prot_table->page_start_offset];
459 #endif /* !CONFIG_ARCH_HAS_SG_CHAIN */
462 rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
463 prot_sg, prot_offset);
465 rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
469 sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);
477 static sense_reason_t
478 rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
479 enum dma_data_direction data_direction)
481 struct se_device *se_dev = cmd->se_dev;
482 struct rd_dev *dev = RD_DEV(se_dev);
483 struct rd_dev_sg_table *table;
484 struct scatterlist *rd_sg;
485 struct sg_mapping_iter m;
493 if (dev->rd_flags & RDF_NULLIO) {
494 target_complete_cmd(cmd, SAM_STAT_GOOD);
498 tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
499 rd_offset = do_div(tmp, PAGE_SIZE);
501 rd_size = cmd->data_length;
503 table = rd_get_sg_table(dev, rd_page);
505 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
507 rd_sg = &table->sg_table[rd_page - table->page_start_offset];
509 pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
511 data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
512 cmd->t_task_lba, rd_size, rd_page, rd_offset);
514 if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
515 data_direction == DMA_TO_DEVICE) {
516 rc = rd_do_prot_rw(cmd, false);
521 src_len = PAGE_SIZE - rd_offset;
522 sg_miter_start(&m, sgl, sgl_nents,
523 data_direction == DMA_FROM_DEVICE ?
524 SG_MITER_TO_SG : SG_MITER_FROM_SG);
530 if (!(u32)m.length) {
531 pr_debug("RD[%u]: invalid sgl %p len %zu\n",
532 dev->rd_dev_id, m.addr, m.length);
534 return TCM_INCORRECT_AMOUNT_OF_DATA;
536 len = min((u32)m.length, src_len);
538 pr_debug("RD[%u]: size underrun page %d offset %d "
539 "size %d\n", dev->rd_dev_id,
540 rd_page, rd_offset, rd_size);
545 rd_addr = sg_virt(rd_sg) + rd_offset;
547 if (data_direction == DMA_FROM_DEVICE)
548 memcpy(m.addr, rd_addr, len);
550 memcpy(rd_addr, m.addr, len);
562 /* rd page completed, next one please */
566 if (rd_page <= table->page_end_offset) {
571 table = rd_get_sg_table(dev, rd_page);
574 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
577 /* since we increment, the first sg entry is correct */
578 rd_sg = table->sg_table;
582 if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
583 data_direction == DMA_FROM_DEVICE) {
584 rc = rd_do_prot_rw(cmd, true);
589 target_complete_cmd(cmd, SAM_STAT_GOOD);
594 Opt_rd_pages, Opt_rd_nullio, Opt_err
597 static match_table_t tokens = {
598 {Opt_rd_pages, "rd_pages=%d"},
599 {Opt_rd_nullio, "rd_nullio=%d"},
603 static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
604 const char *page, ssize_t count)
606 struct rd_dev *rd_dev = RD_DEV(dev);
607 char *orig, *ptr, *opts;
608 substring_t args[MAX_OPT_ARGS];
609 int ret = 0, arg, token;
611 opts = kstrdup(page, GFP_KERNEL);
617 while ((ptr = strsep(&opts, ",\n")) != NULL) {
621 token = match_token(ptr, tokens, args);
624 match_int(args, &arg);
625 rd_dev->rd_page_count = arg;
626 pr_debug("RAMDISK: Referencing Page"
627 " Count: %u\n", rd_dev->rd_page_count);
628 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
631 match_int(args, &arg);
635 pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
636 rd_dev->rd_flags |= RDF_NULLIO;
644 return (!ret) ? count : ret;
647 static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
649 struct rd_dev *rd_dev = RD_DEV(dev);
651 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
653 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
654 " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count,
655 PAGE_SIZE, rd_dev->sg_table_count,
656 !!(rd_dev->rd_flags & RDF_NULLIO));
660 static sector_t rd_get_blocks(struct se_device *dev)
662 struct rd_dev *rd_dev = RD_DEV(dev);
664 unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
665 dev->dev_attrib.block_size) - 1;
670 static int rd_init_prot(struct se_device *dev)
672 struct rd_dev *rd_dev = RD_DEV(dev);
674 if (!dev->dev_attrib.pi_prot_type)
677 return rd_build_prot_space(rd_dev, dev->prot_length,
678 dev->dev_attrib.block_size);
681 static void rd_free_prot(struct se_device *dev)
683 struct rd_dev *rd_dev = RD_DEV(dev);
685 rd_release_prot_space(rd_dev);
688 static struct sbc_ops rd_sbc_ops = {
689 .execute_rw = rd_execute_rw,
692 static sense_reason_t
693 rd_parse_cdb(struct se_cmd *cmd)
695 return sbc_parse_cdb(cmd, &rd_sbc_ops);
698 static const struct target_backend_ops rd_mcp_ops = {
700 .inquiry_prod = "RAMDISK-MCP",
701 .inquiry_rev = RD_MCP_VERSION,
702 .attach_hba = rd_attach_hba,
703 .detach_hba = rd_detach_hba,
704 .alloc_device = rd_alloc_device,
705 .configure_device = rd_configure_device,
706 .free_device = rd_free_device,
707 .parse_cdb = rd_parse_cdb,
708 .set_configfs_dev_params = rd_set_configfs_dev_params,
709 .show_configfs_dev_params = rd_show_configfs_dev_params,
710 .get_device_type = sbc_get_device_type,
711 .get_blocks = rd_get_blocks,
712 .init_prot = rd_init_prot,
713 .free_prot = rd_free_prot,
714 .tb_dev_attrib_attrs = sbc_attrib_attrs,
717 int __init rd_module_init(void)
719 return transport_backend_register(&rd_mcp_ops);
722 void rd_module_exit(void)
724 target_backend_unregister(&rd_mcp_ops);