1 /*******************************************************************************
2 * Filename: target_core_iblock.c
4 * This file contains the Storage Engine <-> Linux BlockIO transport
7 * (c) Copyright 2003-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/timer.h>
31 #include <linux/blkdev.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/bio.h>
35 #include <linux/genhd.h>
36 #include <linux/file.h>
37 #include <linux/module.h>
38 #include <scsi/scsi_proto.h>
39 #include <asm/unaligned.h>
41 #include <target/target_core_base.h>
42 #include <target/target_core_backend.h>
43 #include <target/target_core_backend_configfs.h>
45 #include "target_core_iblock.h"
47 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
48 #define IBLOCK_BIO_POOL_SIZE 128
50 static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
52 return container_of(dev, struct iblock_dev, dev);
56 static struct se_subsystem_api iblock_template;
58 /* iblock_attach_hba(): (Part of se_subsystem_api_t template)
62 static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
64 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
65 " Generic Target Core Stack %s\n", hba->hba_id,
66 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
70 static void iblock_detach_hba(struct se_hba *hba)
74 static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
76 struct iblock_dev *ib_dev = NULL;
78 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
80 pr_err("Unable to allocate struct iblock_dev\n");
84 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
89 static int iblock_configure_device(struct se_device *dev)
91 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
92 struct request_queue *q;
93 struct block_device *bd = NULL;
94 struct blk_integrity *bi;
98 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
99 pr_err("Missing udev_path= parameters for IBLOCK\n");
103 ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
104 if (!ib_dev->ibd_bio_set) {
105 pr_err("IBLOCK: Unable to create bioset\n");
109 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
110 ib_dev->ibd_udev_path);
112 mode = FMODE_READ|FMODE_EXCL;
113 if (!ib_dev->ibd_readonly)
116 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
119 goto out_free_bioset;
123 q = bdev_get_queue(bd);
125 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
126 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
127 dev->dev_attrib.hw_queue_depth = q->nr_requests;
130 * Check if the underlying struct block_device request_queue supports
131 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
132 * in ATA and we need to set TPE=1
134 if (blk_queue_discard(q)) {
135 dev->dev_attrib.max_unmap_lba_count =
136 q->limits.max_discard_sectors;
139 * Currently hardcoded to 1 in Linux/SCSI code..
141 dev->dev_attrib.max_unmap_block_desc_count = 1;
142 dev->dev_attrib.unmap_granularity =
143 q->limits.discard_granularity >> 9;
144 dev->dev_attrib.unmap_granularity_alignment =
145 q->limits.discard_alignment;
147 pr_debug("IBLOCK: BLOCK Discard support available,"
148 " disabled by default\n");
151 * Enable write same emulation for IBLOCK and use 0xFFFF as
152 * the smaller WRITE_SAME(10) only has a two-byte block count.
154 dev->dev_attrib.max_write_same_len = 0xFFFF;
156 if (blk_queue_nonrot(q))
157 dev->dev_attrib.is_nonrot = 1;
159 bi = bdev_get_integrity(bd);
161 struct bio_set *bs = ib_dev->ibd_bio_set;
163 if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") ||
164 !strcmp(bi->name, "T10-DIF-TYPE1-IP")) {
165 pr_err("IBLOCK export of blk_integrity: %s not"
166 " supported\n", bi->name);
171 if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) {
172 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
173 } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) {
174 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
177 if (dev->dev_attrib.pi_prot_type) {
178 if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
179 pr_err("Unable to allocate bioset for PI\n");
183 pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
184 bs->bio_integrity_pool);
186 dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
192 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
194 bioset_free(ib_dev->ibd_bio_set);
195 ib_dev->ibd_bio_set = NULL;
200 static void iblock_free_device(struct se_device *dev)
202 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
204 if (ib_dev->ibd_bd != NULL)
205 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
206 if (ib_dev->ibd_bio_set != NULL)
207 bioset_free(ib_dev->ibd_bio_set);
212 static unsigned long long iblock_emulate_read_cap_with_block_size(
213 struct se_device *dev,
214 struct block_device *bd,
215 struct request_queue *q)
217 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
218 bdev_logical_block_size(bd)) - 1);
219 u32 block_size = bdev_logical_block_size(bd);
221 if (block_size == dev->dev_attrib.block_size)
224 switch (block_size) {
226 switch (dev->dev_attrib.block_size) {
240 switch (dev->dev_attrib.block_size) {
255 switch (dev->dev_attrib.block_size) {
270 switch (dev->dev_attrib.block_size) {
291 static void iblock_complete_cmd(struct se_cmd *cmd)
293 struct iblock_req *ibr = cmd->priv;
296 if (!atomic_dec_and_test(&ibr->pending))
299 if (atomic_read(&ibr->ib_bio_err_cnt))
300 status = SAM_STAT_CHECK_CONDITION;
302 status = SAM_STAT_GOOD;
304 target_complete_cmd(cmd, status);
308 static void iblock_bio_done(struct bio *bio, int err)
310 struct se_cmd *cmd = bio->bi_private;
311 struct iblock_req *ibr = cmd->priv;
314 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
316 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
320 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
321 " err: %d\n", bio, err);
323 * Bump the ib_bio_err_cnt and release bio.
325 atomic_inc(&ibr->ib_bio_err_cnt);
326 smp_mb__after_atomic();
331 iblock_complete_cmd(cmd);
335 iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
337 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
341 * Only allocate as many vector entries as the bio code allows us to,
342 * we'll loop later on until we have handled the whole request.
344 if (sg_num > BIO_MAX_PAGES)
345 sg_num = BIO_MAX_PAGES;
347 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
349 pr_err("Unable to allocate memory for bio\n");
353 bio->bi_bdev = ib_dev->ibd_bd;
354 bio->bi_private = cmd;
355 bio->bi_end_io = &iblock_bio_done;
356 bio->bi_iter.bi_sector = lba;
361 static void iblock_submit_bios(struct bio_list *list, int rw)
363 struct blk_plug plug;
366 blk_start_plug(&plug);
367 while ((bio = bio_list_pop(list)))
369 blk_finish_plug(&plug);
372 static void iblock_end_io_flush(struct bio *bio, int err)
374 struct se_cmd *cmd = bio->bi_private;
377 pr_err("IBLOCK: cache flush failed: %d\n", err);
381 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
383 target_complete_cmd(cmd, SAM_STAT_GOOD);
390 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
391 * always flush the whole cache.
393 static sense_reason_t
394 iblock_execute_sync_cache(struct se_cmd *cmd)
396 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
397 int immed = (cmd->t_task_cdb[1] & 0x2);
401 * If the Immediate bit is set, queue up the GOOD response
402 * for this SYNCHRONIZE_CACHE op.
405 target_complete_cmd(cmd, SAM_STAT_GOOD);
407 bio = bio_alloc(GFP_KERNEL, 0);
408 bio->bi_end_io = iblock_end_io_flush;
409 bio->bi_bdev = ib_dev->ibd_bd;
411 bio->bi_private = cmd;
412 submit_bio(WRITE_FLUSH, bio);
416 static sense_reason_t
417 iblock_do_unmap(struct se_cmd *cmd, void *priv,
418 sector_t lba, sector_t nolb)
420 struct block_device *bdev = priv;
423 ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
425 pr_err("blkdev_issue_discard() failed: %d\n", ret);
426 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
432 static sense_reason_t
433 iblock_execute_unmap(struct se_cmd *cmd)
435 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
437 return sbc_execute_unmap(cmd, iblock_do_unmap, bdev);
440 static sense_reason_t
441 iblock_execute_write_same_unmap(struct se_cmd *cmd)
443 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
444 sector_t lba = cmd->t_task_lba;
445 sector_t nolb = sbc_get_write_same_sectors(cmd);
448 ret = iblock_do_unmap(cmd, bdev, lba, nolb);
452 target_complete_cmd(cmd, GOOD);
456 static sense_reason_t
457 iblock_execute_write_same(struct se_cmd *cmd)
459 struct iblock_req *ibr;
460 struct scatterlist *sg;
462 struct bio_list list;
463 sector_t block_lba = cmd->t_task_lba;
464 sector_t sectors = sbc_get_write_same_sectors(cmd);
467 pr_err("WRITE_SAME: Protection information with IBLOCK"
468 " backends not supported\n");
469 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
471 sg = &cmd->t_data_sg[0];
473 if (cmd->t_data_nents > 1 ||
474 sg->length != cmd->se_dev->dev_attrib.block_size) {
475 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
476 " block_size: %u\n", cmd->t_data_nents, sg->length,
477 cmd->se_dev->dev_attrib.block_size);
478 return TCM_INVALID_CDB_FIELD;
481 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
486 bio = iblock_get_bio(cmd, block_lba, 1);
490 bio_list_init(&list);
491 bio_list_add(&list, bio);
493 atomic_set(&ibr->pending, 1);
496 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
499 bio = iblock_get_bio(cmd, block_lba, 1);
503 atomic_inc(&ibr->pending);
504 bio_list_add(&list, bio);
507 /* Always in 512 byte units for Linux/Block */
508 block_lba += sg->length >> IBLOCK_LBA_SHIFT;
512 iblock_submit_bios(&list, WRITE);
516 while ((bio = bio_list_pop(&list)))
521 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
525 Opt_udev_path, Opt_readonly, Opt_force, Opt_err
528 static match_table_t tokens = {
529 {Opt_udev_path, "udev_path=%s"},
530 {Opt_readonly, "readonly=%d"},
531 {Opt_force, "force=%d"},
535 static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
536 const char *page, ssize_t count)
538 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
539 char *orig, *ptr, *arg_p, *opts;
540 substring_t args[MAX_OPT_ARGS];
542 unsigned long tmp_readonly;
544 opts = kstrdup(page, GFP_KERNEL);
550 while ((ptr = strsep(&opts, ",\n")) != NULL) {
554 token = match_token(ptr, tokens, args);
557 if (ib_dev->ibd_bd) {
558 pr_err("Unable to set udev_path= while"
559 " ib_dev->ibd_bd exists\n");
563 if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
564 SE_UDEV_PATH_LEN) == 0) {
568 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
569 ib_dev->ibd_udev_path);
570 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
573 arg_p = match_strdup(&args[0]);
578 ret = kstrtoul(arg_p, 0, &tmp_readonly);
581 pr_err("kstrtoul() failed for"
585 ib_dev->ibd_readonly = tmp_readonly;
586 pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
597 return (!ret) ? count : ret;
600 static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
602 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
603 struct block_device *bd = ib_dev->ibd_bd;
604 char buf[BDEVNAME_SIZE];
608 bl += sprintf(b + bl, "iBlock device: %s",
610 if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
611 bl += sprintf(b + bl, " UDEV PATH: %s",
612 ib_dev->ibd_udev_path);
613 bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
615 bl += sprintf(b + bl, " ");
617 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
618 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
619 "" : (bd->bd_holder == ib_dev) ?
620 "CLAIMED: IBLOCK" : "CLAIMED: OS");
622 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
629 iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
631 struct se_device *dev = cmd->se_dev;
632 struct blk_integrity *bi;
633 struct bio_integrity_payload *bip;
634 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
635 struct scatterlist *sg;
638 bi = bdev_get_integrity(ib_dev->ibd_bd);
640 pr_err("Unable to locate bio_integrity\n");
644 bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
646 pr_err("Unable to allocate bio_integrity_payload\n");
650 bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
652 bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
654 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
655 (unsigned long long)bip->bip_iter.bi_sector);
657 for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
659 rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
661 if (rc != sg->length) {
662 pr_err("bio_integrity_add_page() failed; %d\n", rc);
666 pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
667 sg_page(sg), sg->length, sg->offset);
673 static sense_reason_t
674 iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
675 enum dma_data_direction data_direction)
677 struct se_device *dev = cmd->se_dev;
678 struct iblock_req *ibr;
679 struct bio *bio, *bio_start;
680 struct bio_list list;
681 struct scatterlist *sg;
682 u32 sg_num = sgl_nents;
688 if (data_direction == DMA_TO_DEVICE) {
689 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
690 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
692 * Force writethrough using WRITE_FUA if a volatile write cache
693 * is not enabled, or if initiator set the Force Unit Access bit.
695 if (q->flush_flags & REQ_FUA) {
696 if (cmd->se_cmd_flags & SCF_FUA)
698 else if (!(q->flush_flags & REQ_FLUSH))
710 * Convert the blocksize advertised to the initiator to the 512 byte
711 * units unconditionally used by the Linux block layer.
713 if (dev->dev_attrib.block_size == 4096)
714 block_lba = (cmd->t_task_lba << 3);
715 else if (dev->dev_attrib.block_size == 2048)
716 block_lba = (cmd->t_task_lba << 2);
717 else if (dev->dev_attrib.block_size == 1024)
718 block_lba = (cmd->t_task_lba << 1);
719 else if (dev->dev_attrib.block_size == 512)
720 block_lba = cmd->t_task_lba;
722 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
723 " %u\n", dev->dev_attrib.block_size);
724 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
727 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
733 atomic_set(&ibr->pending, 1);
734 iblock_complete_cmd(cmd);
738 bio = iblock_get_bio(cmd, block_lba, sgl_nents);
743 bio_list_init(&list);
744 bio_list_add(&list, bio);
746 atomic_set(&ibr->pending, 2);
749 for_each_sg(sgl, sg, sgl_nents, i) {
751 * XXX: if the length the device accepts is shorter than the
752 * length of the S/G list entry this will cause and
753 * endless loop. Better hope no driver uses huge pages.
755 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
757 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
758 iblock_submit_bios(&list, rw);
762 bio = iblock_get_bio(cmd, block_lba, sg_num);
766 atomic_inc(&ibr->pending);
767 bio_list_add(&list, bio);
771 /* Always in 512 byte units for Linux/Block */
772 block_lba += sg->length >> IBLOCK_LBA_SHIFT;
776 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
777 int rc = iblock_alloc_bip(cmd, bio_start);
782 iblock_submit_bios(&list, rw);
783 iblock_complete_cmd(cmd);
787 while ((bio = bio_list_pop(&list)))
792 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
795 static sector_t iblock_get_blocks(struct se_device *dev)
797 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
798 struct block_device *bd = ib_dev->ibd_bd;
799 struct request_queue *q = bdev_get_queue(bd);
801 return iblock_emulate_read_cap_with_block_size(dev, bd, q);
804 static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
806 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
807 struct block_device *bd = ib_dev->ibd_bd;
810 ret = bdev_alignment_offset(bd);
814 /* convert offset-bytes to offset-lbas */
815 return ret / bdev_logical_block_size(bd);
818 static unsigned int iblock_get_lbppbe(struct se_device *dev)
820 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
821 struct block_device *bd = ib_dev->ibd_bd;
822 int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
824 return ilog2(logs_per_phys);
827 static unsigned int iblock_get_io_min(struct se_device *dev)
829 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
830 struct block_device *bd = ib_dev->ibd_bd;
832 return bdev_io_min(bd);
835 static unsigned int iblock_get_io_opt(struct se_device *dev)
837 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
838 struct block_device *bd = ib_dev->ibd_bd;
840 return bdev_io_opt(bd);
843 static struct sbc_ops iblock_sbc_ops = {
844 .execute_rw = iblock_execute_rw,
845 .execute_sync_cache = iblock_execute_sync_cache,
846 .execute_write_same = iblock_execute_write_same,
847 .execute_write_same_unmap = iblock_execute_write_same_unmap,
848 .execute_unmap = iblock_execute_unmap,
851 static sense_reason_t
852 iblock_parse_cdb(struct se_cmd *cmd)
854 return sbc_parse_cdb(cmd, &iblock_sbc_ops);
857 static bool iblock_get_write_cache(struct se_device *dev)
859 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
860 struct block_device *bd = ib_dev->ibd_bd;
861 struct request_queue *q = bdev_get_queue(bd);
863 return q->flush_flags & REQ_FLUSH;
866 DEF_TB_DEFAULT_ATTRIBS(iblock);
868 static struct configfs_attribute *iblock_backend_dev_attrs[] = {
869 &iblock_dev_attrib_emulate_model_alias.attr,
870 &iblock_dev_attrib_emulate_dpo.attr,
871 &iblock_dev_attrib_emulate_fua_write.attr,
872 &iblock_dev_attrib_emulate_fua_read.attr,
873 &iblock_dev_attrib_emulate_write_cache.attr,
874 &iblock_dev_attrib_emulate_ua_intlck_ctrl.attr,
875 &iblock_dev_attrib_emulate_tas.attr,
876 &iblock_dev_attrib_emulate_tpu.attr,
877 &iblock_dev_attrib_emulate_tpws.attr,
878 &iblock_dev_attrib_emulate_caw.attr,
879 &iblock_dev_attrib_emulate_3pc.attr,
880 &iblock_dev_attrib_pi_prot_type.attr,
881 &iblock_dev_attrib_hw_pi_prot_type.attr,
882 &iblock_dev_attrib_pi_prot_format.attr,
883 &iblock_dev_attrib_enforce_pr_isids.attr,
884 &iblock_dev_attrib_is_nonrot.attr,
885 &iblock_dev_attrib_emulate_rest_reord.attr,
886 &iblock_dev_attrib_force_pr_aptpl.attr,
887 &iblock_dev_attrib_hw_block_size.attr,
888 &iblock_dev_attrib_block_size.attr,
889 &iblock_dev_attrib_hw_max_sectors.attr,
890 &iblock_dev_attrib_optimal_sectors.attr,
891 &iblock_dev_attrib_hw_queue_depth.attr,
892 &iblock_dev_attrib_queue_depth.attr,
893 &iblock_dev_attrib_max_unmap_lba_count.attr,
894 &iblock_dev_attrib_max_unmap_block_desc_count.attr,
895 &iblock_dev_attrib_unmap_granularity.attr,
896 &iblock_dev_attrib_unmap_granularity_alignment.attr,
897 &iblock_dev_attrib_max_write_same_len.attr,
901 static struct se_subsystem_api iblock_template = {
903 .inquiry_prod = "IBLOCK",
904 .inquiry_rev = IBLOCK_VERSION,
905 .owner = THIS_MODULE,
906 .attach_hba = iblock_attach_hba,
907 .detach_hba = iblock_detach_hba,
908 .alloc_device = iblock_alloc_device,
909 .configure_device = iblock_configure_device,
910 .free_device = iblock_free_device,
911 .parse_cdb = iblock_parse_cdb,
912 .set_configfs_dev_params = iblock_set_configfs_dev_params,
913 .show_configfs_dev_params = iblock_show_configfs_dev_params,
914 .get_device_type = sbc_get_device_type,
915 .get_blocks = iblock_get_blocks,
916 .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
917 .get_lbppbe = iblock_get_lbppbe,
918 .get_io_min = iblock_get_io_min,
919 .get_io_opt = iblock_get_io_opt,
920 .get_write_cache = iblock_get_write_cache,
923 static int __init iblock_module_init(void)
925 struct target_backend_cits *tbc = &iblock_template.tb_cits;
927 target_core_setup_sub_cits(&iblock_template);
928 tbc->tb_dev_attrib_cit.ct_attrs = iblock_backend_dev_attrs;
930 return transport_subsystem_register(&iblock_template);
933 static void __exit iblock_module_exit(void)
935 transport_subsystem_release(&iblock_template);
938 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
939 MODULE_AUTHOR("nab@Linux-iSCSI.org");
940 MODULE_LICENSE("GPL");
942 module_init(iblock_module_init);
943 module_exit(iblock_module_exit);