1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
12 * Nicholas A. Bellinger <nab@kernel.org>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 ******************************************************************************/
30 #include <linux/net.h>
31 #include <linux/string.h>
32 #include <linux/delay.h>
33 #include <linux/timer.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
38 #include <linux/export.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_device.h>
44 #include <target/target_core_base.h>
45 #include <target/target_core_backend.h>
46 #include <target/target_core_fabric.h>
48 #include "target_core_internal.h"
49 #include "target_core_alua.h"
50 #include "target_core_pr.h"
51 #include "target_core_ua.h"
53 static struct se_hba *lun0_hba;
54 /* not static, needed by tpg.c */
55 struct se_device *g_lun0_dev;
57 int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
59 struct se_lun *se_lun = NULL;
60 struct se_session *se_sess = se_cmd->se_sess;
61 struct se_device *dev;
64 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
65 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
66 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
70 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
71 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
72 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
73 struct se_dev_entry *deve = se_cmd->se_deve;
76 deve->total_bytes += se_cmd->data_length;
78 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
79 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
80 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
81 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
82 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
83 " Access for 0x%08x\n",
84 se_cmd->se_tfo->get_fabric_name(),
86 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
90 if (se_cmd->data_direction == DMA_TO_DEVICE)
91 deve->write_bytes += se_cmd->data_length;
92 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
93 deve->read_bytes += se_cmd->data_length;
97 se_lun = deve->se_lun;
98 se_cmd->se_lun = deve->se_lun;
99 se_cmd->pr_res_key = deve->pr_res_key;
100 se_cmd->orig_fe_lun = unpacked_lun;
101 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
103 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
107 * Use the se_portal_group->tpg_virt_lun0 to allow for
108 * REPORT_LUNS, et al to be returned when no active
109 * MappedLUN=0 exists for this Initiator Port.
111 if (unpacked_lun != 0) {
112 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
113 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
114 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
115 " Access for 0x%08x\n",
116 se_cmd->se_tfo->get_fabric_name(),
121 * Force WRITE PROTECT for virtual LUN 0
123 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
124 (se_cmd->data_direction != DMA_NONE)) {
125 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
126 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
130 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
131 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
132 se_cmd->orig_fe_lun = 0;
133 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
136 /* Directly associate cmd with se_dev */
137 se_cmd->se_dev = se_lun->lun_se_dev;
139 /* TODO: get rid of this and use atomics for stats */
140 dev = se_lun->lun_se_dev;
141 spin_lock_irqsave(&dev->stats_lock, flags);
143 if (se_cmd->data_direction == DMA_TO_DEVICE)
144 dev->write_bytes += se_cmd->data_length;
145 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
146 dev->read_bytes += se_cmd->data_length;
147 spin_unlock_irqrestore(&dev->stats_lock, flags);
149 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
150 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
151 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
155 EXPORT_SYMBOL(transport_lookup_cmd_lun);
157 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
159 struct se_dev_entry *deve;
160 struct se_lun *se_lun = NULL;
161 struct se_session *se_sess = se_cmd->se_sess;
162 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
165 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
166 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
167 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
171 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
172 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
173 deve = se_cmd->se_deve;
175 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
176 se_tmr->tmr_lun = deve->se_lun;
177 se_cmd->se_lun = deve->se_lun;
178 se_lun = deve->se_lun;
179 se_cmd->pr_res_key = deve->pr_res_key;
180 se_cmd->orig_fe_lun = unpacked_lun;
182 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
185 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
186 " Access for 0x%08x\n",
187 se_cmd->se_tfo->get_fabric_name(),
189 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
193 /* Directly associate cmd with se_dev */
194 se_cmd->se_dev = se_lun->lun_se_dev;
195 se_tmr->tmr_dev = se_lun->lun_se_dev;
197 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
198 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
199 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
203 EXPORT_SYMBOL(transport_lookup_tmr_lun);
206 * This function is called from core_scsi3_emulate_pro_register_and_move()
207 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
208 * when a matching rtpi is found.
210 struct se_dev_entry *core_get_se_deve_from_rtpi(
211 struct se_node_acl *nacl,
214 struct se_dev_entry *deve;
216 struct se_port *port;
217 struct se_portal_group *tpg = nacl->se_tpg;
220 spin_lock_irq(&nacl->device_list_lock);
221 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
222 deve = nacl->device_list[i];
224 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
229 pr_err("%s device entries device pointer is"
230 " NULL, but Initiator has access.\n",
231 tpg->se_tpg_tfo->get_fabric_name());
236 pr_err("%s device entries device pointer is"
237 " NULL, but Initiator has access.\n",
238 tpg->se_tpg_tfo->get_fabric_name());
241 if (port->sep_rtpi != rtpi)
244 atomic_inc(&deve->pr_ref_count);
245 smp_mb__after_atomic_inc();
246 spin_unlock_irq(&nacl->device_list_lock);
250 spin_unlock_irq(&nacl->device_list_lock);
255 int core_free_device_list_for_node(
256 struct se_node_acl *nacl,
257 struct se_portal_group *tpg)
259 struct se_dev_entry *deve;
263 if (!nacl->device_list)
266 spin_lock_irq(&nacl->device_list_lock);
267 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
268 deve = nacl->device_list[i];
270 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
274 pr_err("%s device entries device pointer is"
275 " NULL, but Initiator has access.\n",
276 tpg->se_tpg_tfo->get_fabric_name());
281 spin_unlock_irq(&nacl->device_list_lock);
282 core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
283 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
284 spin_lock_irq(&nacl->device_list_lock);
286 spin_unlock_irq(&nacl->device_list_lock);
288 array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
289 nacl->device_list = NULL;
294 void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
296 struct se_dev_entry *deve;
299 spin_lock_irqsave(&se_nacl->device_list_lock, flags);
300 deve = se_nacl->device_list[se_cmd->orig_fe_lun];
302 spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
305 void core_update_device_list_access(
308 struct se_node_acl *nacl)
310 struct se_dev_entry *deve;
312 spin_lock_irq(&nacl->device_list_lock);
313 deve = nacl->device_list[mapped_lun];
314 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
315 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
316 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
318 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
319 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
321 spin_unlock_irq(&nacl->device_list_lock);
324 /* core_enable_device_list_for_node():
328 int core_enable_device_list_for_node(
330 struct se_lun_acl *lun_acl,
333 struct se_node_acl *nacl,
334 struct se_portal_group *tpg)
336 struct se_port *port = lun->lun_sep;
337 struct se_dev_entry *deve;
339 spin_lock_irq(&nacl->device_list_lock);
341 deve = nacl->device_list[mapped_lun];
344 * Check if the call is handling demo mode -> explict LUN ACL
345 * transition. This transition must be for the same struct se_lun
346 * + mapped_lun that was setup in demo mode..
348 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
349 if (deve->se_lun_acl != NULL) {
350 pr_err("struct se_dev_entry->se_lun_acl"
351 " already set for demo mode -> explict"
352 " LUN ACL transition\n");
353 spin_unlock_irq(&nacl->device_list_lock);
356 if (deve->se_lun != lun) {
357 pr_err("struct se_dev_entry->se_lun does"
358 " match passed struct se_lun for demo mode"
359 " -> explict LUN ACL transition\n");
360 spin_unlock_irq(&nacl->device_list_lock);
363 deve->se_lun_acl = lun_acl;
365 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
366 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
367 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
369 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
370 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
373 spin_unlock_irq(&nacl->device_list_lock);
378 deve->se_lun_acl = lun_acl;
379 deve->mapped_lun = mapped_lun;
380 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
382 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
383 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
384 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
386 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
387 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
390 deve->creation_time = get_jiffies_64();
391 deve->attach_count++;
392 spin_unlock_irq(&nacl->device_list_lock);
394 spin_lock_bh(&port->sep_alua_lock);
395 list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
396 spin_unlock_bh(&port->sep_alua_lock);
401 /* core_disable_device_list_for_node():
405 int core_disable_device_list_for_node(
407 struct se_lun_acl *lun_acl,
410 struct se_node_acl *nacl,
411 struct se_portal_group *tpg)
413 struct se_port *port = lun->lun_sep;
414 struct se_dev_entry *deve = nacl->device_list[mapped_lun];
417 * If the MappedLUN entry is being disabled, the entry in
418 * port->sep_alua_list must be removed now before clearing the
419 * struct se_dev_entry pointers below as logic in
420 * core_alua_do_transition_tg_pt() depends on these being present.
422 * deve->se_lun_acl will be NULL for demo-mode created LUNs
423 * that have not been explicitly converted to MappedLUNs ->
424 * struct se_lun_acl, but we remove deve->alua_port_list from
425 * port->sep_alua_list. This also means that active UAs and
426 * NodeACL context specific PR metadata for demo-mode
427 * MappedLUN *deve will be released below..
429 spin_lock_bh(&port->sep_alua_lock);
430 list_del(&deve->alua_port_list);
431 spin_unlock_bh(&port->sep_alua_lock);
433 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
434 * PR operation to complete.
436 while (atomic_read(&deve->pr_ref_count) != 0)
439 spin_lock_irq(&nacl->device_list_lock);
441 * Disable struct se_dev_entry LUN ACL mapping
443 core_scsi3_ua_release_all(deve);
445 deve->se_lun_acl = NULL;
447 deve->creation_time = 0;
448 deve->attach_count--;
449 spin_unlock_irq(&nacl->device_list_lock);
451 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
455 /* core_clear_lun_from_tpg():
459 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
461 struct se_node_acl *nacl;
462 struct se_dev_entry *deve;
465 spin_lock_irq(&tpg->acl_node_lock);
466 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
467 spin_unlock_irq(&tpg->acl_node_lock);
469 spin_lock_irq(&nacl->device_list_lock);
470 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
471 deve = nacl->device_list[i];
472 if (lun != deve->se_lun)
474 spin_unlock_irq(&nacl->device_list_lock);
476 core_disable_device_list_for_node(lun, NULL,
477 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
480 spin_lock_irq(&nacl->device_list_lock);
482 spin_unlock_irq(&nacl->device_list_lock);
484 spin_lock_irq(&tpg->acl_node_lock);
486 spin_unlock_irq(&tpg->acl_node_lock);
489 static struct se_port *core_alloc_port(struct se_device *dev)
491 struct se_port *port, *port_tmp;
493 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
495 pr_err("Unable to allocate struct se_port\n");
496 return ERR_PTR(-ENOMEM);
498 INIT_LIST_HEAD(&port->sep_alua_list);
499 INIT_LIST_HEAD(&port->sep_list);
500 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
501 spin_lock_init(&port->sep_alua_lock);
502 mutex_init(&port->sep_tg_pt_md_mutex);
504 spin_lock(&dev->se_port_lock);
505 if (dev->dev_port_count == 0x0000ffff) {
506 pr_warn("Reached dev->dev_port_count =="
508 spin_unlock(&dev->se_port_lock);
509 return ERR_PTR(-ENOSPC);
513 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
514 * Here is the table from spc4r17 section 7.7.3.8.
516 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
520 * 1h Relative port 1, historically known as port A
521 * 2h Relative port 2, historically known as port B
522 * 3h to FFFFh Relative port 3 through 65 535
524 port->sep_rtpi = dev->dev_rpti_counter++;
528 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
530 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
533 if (port->sep_rtpi == port_tmp->sep_rtpi)
536 spin_unlock(&dev->se_port_lock);
541 static void core_export_port(
542 struct se_device *dev,
543 struct se_portal_group *tpg,
544 struct se_port *port,
547 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
549 spin_lock(&dev->se_port_lock);
550 spin_lock(&lun->lun_sep_lock);
554 spin_unlock(&lun->lun_sep_lock);
556 list_add_tail(&port->sep_list, &dev->dev_sep_list);
557 spin_unlock(&dev->se_port_lock);
559 if (dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
560 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
561 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
562 pr_err("Unable to allocate t10_alua_tg_pt"
566 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
567 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
568 dev->t10_alua.default_tg_pt_gp);
569 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
570 pr_debug("%s/%s: Adding to default ALUA Target Port"
571 " Group: alua/default_tg_pt_gp\n",
572 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
575 dev->dev_port_count++;
576 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
580 * Called with struct se_device->se_port_lock spinlock held.
582 static void core_release_port(struct se_device *dev, struct se_port *port)
583 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
586 * Wait for any port reference for PR ALL_TG_PT=1 operation
587 * to complete in __core_scsi3_alloc_registration()
589 spin_unlock(&dev->se_port_lock);
590 if (atomic_read(&port->sep_tg_pt_ref_cnt))
592 spin_lock(&dev->se_port_lock);
594 core_alua_free_tg_pt_gp_mem(port);
596 list_del(&port->sep_list);
597 dev->dev_port_count--;
602 struct se_device *dev,
603 struct se_portal_group *tpg,
606 struct se_hba *hba = dev->se_hba;
607 struct se_port *port;
609 port = core_alloc_port(dev);
611 return PTR_ERR(port);
613 lun->lun_se_dev = dev;
615 spin_lock(&hba->device_lock);
617 spin_unlock(&hba->device_lock);
619 core_export_port(dev, tpg, port, lun);
623 void core_dev_unexport(
624 struct se_device *dev,
625 struct se_portal_group *tpg,
628 struct se_hba *hba = dev->se_hba;
629 struct se_port *port = lun->lun_sep;
631 spin_lock(&lun->lun_sep_lock);
632 if (lun->lun_se_dev == NULL) {
633 spin_unlock(&lun->lun_sep_lock);
636 spin_unlock(&lun->lun_sep_lock);
638 spin_lock(&dev->se_port_lock);
639 core_release_port(dev, port);
640 spin_unlock(&dev->se_port_lock);
642 spin_lock(&hba->device_lock);
644 spin_unlock(&hba->device_lock);
646 lun->lun_se_dev = NULL;
649 int target_report_luns(struct se_cmd *se_cmd)
651 struct se_dev_entry *deve;
652 struct se_session *se_sess = se_cmd->se_sess;
654 u32 lun_count = 0, offset = 8, i;
656 if (se_cmd->data_length < 16) {
657 pr_warn("REPORT LUNS allocation length %u too small\n",
658 se_cmd->data_length);
659 se_cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
663 buf = transport_kmap_data_sg(se_cmd);
668 * If no struct se_session pointer is present, this struct se_cmd is
669 * coming via a target_core_mod PASSTHROUGH op, and not through
670 * a $FABRIC_MOD. In that case, report LUN=0 only.
673 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
678 spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
679 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
680 deve = se_sess->se_node_acl->device_list[i];
681 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
684 * We determine the correct LUN LIST LENGTH even once we
685 * have reached the initial allocation length.
689 if ((offset + 8) > se_cmd->data_length)
692 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
695 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
698 * See SPC3 r07, page 159.
702 buf[0] = ((lun_count >> 24) & 0xff);
703 buf[1] = ((lun_count >> 16) & 0xff);
704 buf[2] = ((lun_count >> 8) & 0xff);
705 buf[3] = (lun_count & 0xff);
706 transport_kunmap_data_sg(se_cmd);
708 target_complete_cmd(se_cmd, GOOD);
712 static void se_release_vpd_for_dev(struct se_device *dev)
714 struct t10_vpd *vpd, *vpd_tmp;
716 spin_lock(&dev->t10_wwn.t10_vpd_lock);
717 list_for_each_entry_safe(vpd, vpd_tmp,
718 &dev->t10_wwn.t10_vpd_list, vpd_list) {
719 list_del(&vpd->vpd_list);
722 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
725 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
727 u32 aligned_max_sectors;
730 * Limit max_sectors to a PAGE_SIZE aligned value for modern
731 * transport_allocate_data_tasks() operation.
733 alignment = max(1ul, PAGE_SIZE / block_size);
734 aligned_max_sectors = rounddown(max_sectors, alignment);
736 if (max_sectors != aligned_max_sectors)
737 pr_info("Rounding down aligned max_sectors from %u to %u\n",
738 max_sectors, aligned_max_sectors);
740 return aligned_max_sectors;
743 int se_dev_set_max_unmap_lba_count(
744 struct se_device *dev,
745 u32 max_unmap_lba_count)
747 dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
748 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
749 dev, dev->dev_attrib.max_unmap_lba_count);
753 int se_dev_set_max_unmap_block_desc_count(
754 struct se_device *dev,
755 u32 max_unmap_block_desc_count)
757 dev->dev_attrib.max_unmap_block_desc_count =
758 max_unmap_block_desc_count;
759 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
760 dev, dev->dev_attrib.max_unmap_block_desc_count);
764 int se_dev_set_unmap_granularity(
765 struct se_device *dev,
766 u32 unmap_granularity)
768 dev->dev_attrib.unmap_granularity = unmap_granularity;
769 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
770 dev, dev->dev_attrib.unmap_granularity);
774 int se_dev_set_unmap_granularity_alignment(
775 struct se_device *dev,
776 u32 unmap_granularity_alignment)
778 dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
779 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
780 dev, dev->dev_attrib.unmap_granularity_alignment);
784 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
786 if (flag != 0 && flag != 1) {
787 pr_err("Illegal value %d\n", flag);
792 pr_err("dpo_emulated not supported\n");
799 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
801 if (flag != 0 && flag != 1) {
802 pr_err("Illegal value %d\n", flag);
807 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
808 pr_err("emulate_fua_write not supported for pSCSI\n");
811 dev->dev_attrib.emulate_fua_write = flag;
812 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
813 dev, dev->dev_attrib.emulate_fua_write);
817 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
819 if (flag != 0 && flag != 1) {
820 pr_err("Illegal value %d\n", flag);
825 pr_err("ua read emulated not supported\n");
832 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
834 if (flag != 0 && flag != 1) {
835 pr_err("Illegal value %d\n", flag);
839 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
840 pr_err("emulate_write_cache not supported for pSCSI\n");
843 dev->dev_attrib.emulate_write_cache = flag;
844 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
845 dev, dev->dev_attrib.emulate_write_cache);
849 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
851 if ((flag != 0) && (flag != 1) && (flag != 2)) {
852 pr_err("Illegal value %d\n", flag);
856 if (dev->export_count) {
857 pr_err("dev[%p]: Unable to change SE Device"
858 " UA_INTRLCK_CTRL while export_count is %d\n",
859 dev, dev->export_count);
862 dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
863 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
864 dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
869 int se_dev_set_emulate_tas(struct se_device *dev, int flag)
871 if ((flag != 0) && (flag != 1)) {
872 pr_err("Illegal value %d\n", flag);
876 if (dev->export_count) {
877 pr_err("dev[%p]: Unable to change SE Device TAS while"
878 " export_count is %d\n",
879 dev, dev->export_count);
882 dev->dev_attrib.emulate_tas = flag;
883 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
884 dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
889 int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
891 if ((flag != 0) && (flag != 1)) {
892 pr_err("Illegal value %d\n", flag);
896 * We expect this value to be non-zero when generic Block Layer
897 * Discard supported is detected iblock_create_virtdevice().
899 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
900 pr_err("Generic Block Discard not supported\n");
904 dev->dev_attrib.emulate_tpu = flag;
905 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
910 int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
912 if ((flag != 0) && (flag != 1)) {
913 pr_err("Illegal value %d\n", flag);
917 * We expect this value to be non-zero when generic Block Layer
918 * Discard supported is detected iblock_create_virtdevice().
920 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
921 pr_err("Generic Block Discard not supported\n");
925 dev->dev_attrib.emulate_tpws = flag;
926 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
931 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
933 if ((flag != 0) && (flag != 1)) {
934 pr_err("Illegal value %d\n", flag);
937 dev->dev_attrib.enforce_pr_isids = flag;
938 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
939 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
943 int se_dev_set_is_nonrot(struct se_device *dev, int flag)
945 if ((flag != 0) && (flag != 1)) {
946 printk(KERN_ERR "Illegal value %d\n", flag);
949 dev->dev_attrib.is_nonrot = flag;
950 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
955 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
958 printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
959 " reordering not implemented\n", dev);
962 dev->dev_attrib.emulate_rest_reord = flag;
963 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
968 * Note, this can only be called on unexported SE Device Object.
970 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
972 if (dev->export_count) {
973 pr_err("dev[%p]: Unable to change SE Device TCQ while"
974 " export_count is %d\n",
975 dev, dev->export_count);
979 pr_err("dev[%p]: Illegal ZERO value for queue"
984 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
985 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
986 pr_err("dev[%p]: Passed queue_depth: %u"
987 " exceeds TCM/SE_Device TCQ: %u\n",
989 dev->dev_attrib.hw_queue_depth);
993 if (queue_depth > dev->dev_attrib.queue_depth) {
994 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
995 pr_err("dev[%p]: Passed queue_depth:"
996 " %u exceeds TCM/SE_Device MAX"
997 " TCQ: %u\n", dev, queue_depth,
998 dev->dev_attrib.hw_queue_depth);
1004 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1005 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1010 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1012 if (dev->export_count) {
1013 pr_err("dev[%p]: Unable to change SE Device"
1014 " fabric_max_sectors while export_count is %d\n",
1015 dev, dev->export_count);
1018 if (!fabric_max_sectors) {
1019 pr_err("dev[%p]: Illegal ZERO value for"
1020 " fabric_max_sectors\n", dev);
1023 if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1024 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
1025 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
1026 DA_STATUS_MAX_SECTORS_MIN);
1029 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1030 if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
1031 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1032 " greater than TCM/SE_Device max_sectors:"
1033 " %u\n", dev, fabric_max_sectors,
1034 dev->dev_attrib.hw_max_sectors);
1038 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1039 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1040 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1041 " %u\n", dev, fabric_max_sectors,
1042 DA_STATUS_MAX_SECTORS_MAX);
1047 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1049 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
1050 dev->dev_attrib.block_size);
1052 dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
1053 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1054 dev, fabric_max_sectors);
1058 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1060 if (dev->export_count) {
1061 pr_err("dev[%p]: Unable to change SE Device"
1062 " optimal_sectors while export_count is %d\n",
1063 dev, dev->export_count);
1066 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1067 pr_err("dev[%p]: Passed optimal_sectors cannot be"
1068 " changed for TCM/pSCSI\n", dev);
1071 if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
1072 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1073 " greater than fabric_max_sectors: %u\n", dev,
1074 optimal_sectors, dev->dev_attrib.fabric_max_sectors);
1078 dev->dev_attrib.optimal_sectors = optimal_sectors;
1079 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1080 dev, optimal_sectors);
1084 int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1086 if (dev->export_count) {
1087 pr_err("dev[%p]: Unable to change SE Device block_size"
1088 " while export_count is %d\n",
1089 dev, dev->export_count);
1093 if ((block_size != 512) &&
1094 (block_size != 1024) &&
1095 (block_size != 2048) &&
1096 (block_size != 4096)) {
1097 pr_err("dev[%p]: Illegal value for block_device: %u"
1098 " for SE device, must be 512, 1024, 2048 or 4096\n",
1103 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1104 pr_err("dev[%p]: Not allowed to change block_size for"
1105 " Physical Device, use for Linux/SCSI to change"
1106 " block_size for underlying hardware\n", dev);
1110 dev->dev_attrib.block_size = block_size;
1111 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1116 struct se_lun *core_dev_add_lun(
1117 struct se_portal_group *tpg,
1118 struct se_device *dev,
1121 struct se_lun *lun_p;
1124 lun_p = core_tpg_pre_addlun(tpg, lun);
1128 rc = core_tpg_post_addlun(tpg, lun_p,
1129 TRANSPORT_LUNFLAGS_READ_WRITE, dev);
1133 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1134 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1135 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
1136 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
1138 * Update LUN maps for dynamically added initiators when
1139 * generate_node_acl is enabled.
1141 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1142 struct se_node_acl *acl;
1143 spin_lock_irq(&tpg->acl_node_lock);
1144 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1145 if (acl->dynamic_node_acl &&
1146 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1147 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1148 spin_unlock_irq(&tpg->acl_node_lock);
1149 core_tpg_add_node_to_devs(acl, tpg);
1150 spin_lock_irq(&tpg->acl_node_lock);
1153 spin_unlock_irq(&tpg->acl_node_lock);
1159 /* core_dev_del_lun():
1163 int core_dev_del_lun(
1164 struct se_portal_group *tpg,
1169 lun = core_tpg_pre_dellun(tpg, unpacked_lun);
1171 return PTR_ERR(lun);
1173 core_tpg_post_dellun(tpg, lun);
1175 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1176 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1177 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1178 tpg->se_tpg_tfo->get_fabric_name());
1183 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1187 spin_lock(&tpg->tpg_lun_lock);
1188 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1189 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1190 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1191 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1192 TRANSPORT_MAX_LUNS_PER_TPG-1,
1193 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1194 spin_unlock(&tpg->tpg_lun_lock);
1197 lun = tpg->tpg_lun_list[unpacked_lun];
1199 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1200 pr_err("%s Logical Unit Number: %u is not free on"
1201 " Target Portal Group: %hu, ignoring request.\n",
1202 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1203 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1204 spin_unlock(&tpg->tpg_lun_lock);
1207 spin_unlock(&tpg->tpg_lun_lock);
1212 /* core_dev_get_lun():
1216 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1220 spin_lock(&tpg->tpg_lun_lock);
1221 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1222 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1223 "_TPG-1: %u for Target Portal Group: %hu\n",
1224 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1225 TRANSPORT_MAX_LUNS_PER_TPG-1,
1226 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1227 spin_unlock(&tpg->tpg_lun_lock);
1230 lun = tpg->tpg_lun_list[unpacked_lun];
1232 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1233 pr_err("%s Logical Unit Number: %u is not active on"
1234 " Target Portal Group: %hu, ignoring request.\n",
1235 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1236 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1237 spin_unlock(&tpg->tpg_lun_lock);
1240 spin_unlock(&tpg->tpg_lun_lock);
1245 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1246 struct se_portal_group *tpg,
1248 char *initiatorname,
1251 struct se_lun_acl *lacl;
1252 struct se_node_acl *nacl;
1254 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
1255 pr_err("%s InitiatorName exceeds maximum size.\n",
1256 tpg->se_tpg_tfo->get_fabric_name());
1260 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
1265 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1267 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1272 INIT_LIST_HEAD(&lacl->lacl_list);
1273 lacl->mapped_lun = mapped_lun;
1274 lacl->se_lun_nacl = nacl;
1275 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
1280 int core_dev_add_initiator_node_lun_acl(
1281 struct se_portal_group *tpg,
1282 struct se_lun_acl *lacl,
1287 struct se_node_acl *nacl;
1289 lun = core_dev_get_lun(tpg, unpacked_lun);
1291 pr_err("%s Logical Unit Number: %u is not active on"
1292 " Target Portal Group: %hu, ignoring request.\n",
1293 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1294 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1298 nacl = lacl->se_lun_nacl;
1302 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1303 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1304 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1308 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
1309 lun_access, nacl, tpg) < 0)
1312 spin_lock(&lun->lun_acl_lock);
1313 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1314 atomic_inc(&lun->lun_acl_count);
1315 smp_mb__after_atomic_inc();
1316 spin_unlock(&lun->lun_acl_lock);
1318 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1319 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1320 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1321 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1322 lacl->initiatorname);
1324 * Check to see if there are any existing persistent reservation APTPL
1325 * pre-registrations that need to be enabled for this LUN ACL..
1327 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1331 /* core_dev_del_initiator_node_lun_acl():
1335 int core_dev_del_initiator_node_lun_acl(
1336 struct se_portal_group *tpg,
1338 struct se_lun_acl *lacl)
1340 struct se_node_acl *nacl;
1342 nacl = lacl->se_lun_nacl;
1346 spin_lock(&lun->lun_acl_lock);
1347 list_del(&lacl->lacl_list);
1348 atomic_dec(&lun->lun_acl_count);
1349 smp_mb__after_atomic_dec();
1350 spin_unlock(&lun->lun_acl_lock);
1352 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
1353 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
1355 lacl->se_lun = NULL;
1357 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1358 " InitiatorNode: %s Mapped LUN: %u\n",
1359 tpg->se_tpg_tfo->get_fabric_name(),
1360 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1361 lacl->initiatorname, lacl->mapped_lun);
1366 void core_dev_free_initiator_node_lun_acl(
1367 struct se_portal_group *tpg,
1368 struct se_lun_acl *lacl)
1370 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1371 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1372 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1373 tpg->se_tpg_tfo->get_fabric_name(),
1374 lacl->initiatorname, lacl->mapped_lun);
1379 static void scsi_dump_inquiry(struct se_device *dev)
1381 struct t10_wwn *wwn = &dev->t10_wwn;
1385 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1387 for (i = 0; i < 8; i++)
1388 if (wwn->vendor[i] >= 0x20)
1389 buf[i] = wwn->vendor[i];
1393 pr_debug(" Vendor: %s\n", buf);
1395 for (i = 0; i < 16; i++)
1396 if (wwn->model[i] >= 0x20)
1397 buf[i] = wwn->model[i];
1401 pr_debug(" Model: %s\n", buf);
1403 for (i = 0; i < 4; i++)
1404 if (wwn->revision[i] >= 0x20)
1405 buf[i] = wwn->revision[i];
1409 pr_debug(" Revision: %s\n", buf);
1411 device_type = dev->transport->get_device_type(dev);
1412 pr_debug(" Type: %s ", scsi_device_type(device_type));
1413 pr_debug(" ANSI SCSI revision: %02x\n",
1414 dev->transport->get_device_rev(dev));
1417 struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1419 struct se_device *dev;
1421 dev = hba->transport->alloc_device(hba, name);
1426 dev->transport = hba->transport;
1428 INIT_LIST_HEAD(&dev->dev_list);
1429 INIT_LIST_HEAD(&dev->dev_sep_list);
1430 INIT_LIST_HEAD(&dev->dev_tmr_list);
1431 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1432 INIT_LIST_HEAD(&dev->state_list);
1433 INIT_LIST_HEAD(&dev->qf_cmd_list);
1434 spin_lock_init(&dev->stats_lock);
1435 spin_lock_init(&dev->execute_task_lock);
1436 spin_lock_init(&dev->delayed_cmd_lock);
1437 spin_lock_init(&dev->dev_reservation_lock);
1438 spin_lock_init(&dev->se_port_lock);
1439 spin_lock_init(&dev->se_tmr_lock);
1440 spin_lock_init(&dev->qf_cmd_lock);
1441 atomic_set(&dev->dev_ordered_id, 0);
1442 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
1443 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
1444 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
1445 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
1446 spin_lock_init(&dev->t10_pr.registration_lock);
1447 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
1448 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
1449 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
1451 dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1452 dev->t10_wwn.t10_dev = dev;
1453 dev->t10_alua.t10_dev = dev;
1455 dev->dev_attrib.da_dev = dev;
1456 dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
1457 dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
1458 dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
1459 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
1460 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
1461 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
1462 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
1463 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
1464 dev->dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
1465 dev->dev_attrib.emulate_alua = DA_EMULATE_ALUA;
1466 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
1467 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
1468 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
1469 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
1470 dev->dev_attrib.max_unmap_block_desc_count =
1471 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
1472 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
1473 dev->dev_attrib.unmap_granularity_alignment =
1474 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
1475 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
1476 dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
1478 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
1479 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1481 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1486 int target_configure_device(struct se_device *dev)
1488 struct se_hba *hba = dev->se_hba;
1491 if (dev->dev_flags & DF_CONFIGURED) {
1492 pr_err("se_dev->se_dev_ptr already set for storage"
1497 ret = dev->transport->configure_device(dev);
1500 dev->dev_flags |= DF_CONFIGURED;
1503 * XXX: there is not much point to have two different values here..
1505 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
1506 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
1509 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
1511 dev->dev_attrib.hw_max_sectors =
1512 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
1513 dev->dev_attrib.hw_block_size);
1515 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1516 dev->creation_time = get_jiffies_64();
1518 core_setup_reservations(dev);
1520 ret = core_setup_alua(dev);
1525 * Startup the struct se_device processing thread
1527 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
1528 dev->transport->name);
1530 pr_err("Unable to create tmr workqueue for %s\n",
1531 dev->transport->name);
1537 * Setup work_queue for QUEUE_FULL
1539 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1542 * Preload the initial INQUIRY const values if we are doing
1543 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1544 * passthrough because this is being provided by the backend LLD.
1546 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1547 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1548 strncpy(&dev->t10_wwn.model[0],
1549 dev->transport->inquiry_prod, 16);
1550 strncpy(&dev->t10_wwn.revision[0],
1551 dev->transport->inquiry_rev, 4);
1554 scsi_dump_inquiry(dev);
1556 spin_lock(&hba->device_lock);
1558 spin_unlock(&hba->device_lock);
1562 core_alua_free_lu_gp_mem(dev);
1564 se_release_vpd_for_dev(dev);
1568 void target_free_device(struct se_device *dev)
1570 struct se_hba *hba = dev->se_hba;
1572 WARN_ON(!list_empty(&dev->dev_sep_list));
1574 if (dev->dev_flags & DF_CONFIGURED) {
1575 destroy_workqueue(dev->tmr_wq);
1577 spin_lock(&hba->device_lock);
1579 spin_unlock(&hba->device_lock);
1582 core_alua_free_lu_gp_mem(dev);
1583 core_scsi3_free_all_registrations(dev);
1584 se_release_vpd_for_dev(dev);
1586 dev->transport->free_device(dev);
1589 int core_dev_setup_virtual_lun0(void)
1592 struct se_device *dev;
1596 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1598 return PTR_ERR(hba);
1600 dev = target_alloc_device(hba, "virt_lun0");
1607 sprintf(buf, "rd_pages=8");
1608 hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
1610 ret = target_configure_device(dev);
1612 goto out_free_se_dev;
1619 target_free_device(dev);
1621 core_delete_hba(hba);
1626 void core_dev_release_virtual_lun0(void)
1628 struct se_hba *hba = lun0_hba;
1634 target_free_device(g_lun0_dev);
1635 core_delete_hba(hba);