1 /*******************************************************************************
2 * Filename: target_core_transport.c
4 * This file contains the Generic Target Engine Core.
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * Nicholas A. Bellinger <nab@kernel.org>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 ******************************************************************************/
29 #include <linux/net.h>
30 #include <linux/delay.h>
31 #include <linux/string.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/blkdev.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
38 #include <linux/cdrom.h>
39 #include <linux/module.h>
40 #include <asm/unaligned.h>
43 #include <scsi/scsi.h>
44 #include <scsi/scsi_cmnd.h>
45 #include <scsi/scsi_tcq.h>
47 #include <target/target_core_base.h>
48 #include <target/target_core_device.h>
49 #include <target/target_core_tmr.h>
50 #include <target/target_core_tpg.h>
51 #include <target/target_core_transport.h>
52 #include <target/target_core_fabric_ops.h>
53 #include <target/target_core_configfs.h>
55 #include "target_core_internal.h"
56 #include "target_core_alua.h"
57 #include "target_core_pr.h"
58 #include "target_core_ua.h"
60 static int sub_api_initialized;
62 static struct workqueue_struct *target_completion_wq;
63 static struct kmem_cache *se_sess_cache;
64 struct kmem_cache *se_tmr_req_cache;
65 struct kmem_cache *se_ua_cache;
66 struct kmem_cache *t10_pr_reg_cache;
67 struct kmem_cache *t10_alua_lu_gp_cache;
68 struct kmem_cache *t10_alua_lu_gp_mem_cache;
69 struct kmem_cache *t10_alua_tg_pt_gp_cache;
70 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
72 static int transport_generic_write_pending(struct se_cmd *);
73 static int transport_processing_thread(void *param);
74 static int __transport_execute_tasks(struct se_device *dev);
75 static void transport_complete_task_attr(struct se_cmd *cmd);
76 static void transport_handle_queue_full(struct se_cmd *cmd,
77 struct se_device *dev);
78 static void transport_free_dev_tasks(struct se_cmd *cmd);
79 static int transport_generic_get_mem(struct se_cmd *cmd);
80 static void transport_put_cmd(struct se_cmd *cmd);
81 static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
82 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
83 static void transport_generic_request_failure(struct se_cmd *);
84 static void target_complete_ok_work(struct work_struct *work);
86 int init_se_kmem_caches(void)
88 se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
89 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
91 if (!se_tmr_req_cache) {
92 pr_err("kmem_cache_create() for struct se_tmr_req"
96 se_sess_cache = kmem_cache_create("se_sess_cache",
97 sizeof(struct se_session), __alignof__(struct se_session),
100 pr_err("kmem_cache_create() for struct se_session"
102 goto out_free_tmr_req_cache;
104 se_ua_cache = kmem_cache_create("se_ua_cache",
105 sizeof(struct se_ua), __alignof__(struct se_ua),
108 pr_err("kmem_cache_create() for struct se_ua failed\n");
109 goto out_free_sess_cache;
111 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
112 sizeof(struct t10_pr_registration),
113 __alignof__(struct t10_pr_registration), 0, NULL);
114 if (!t10_pr_reg_cache) {
115 pr_err("kmem_cache_create() for struct t10_pr_registration"
117 goto out_free_ua_cache;
119 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
120 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
122 if (!t10_alua_lu_gp_cache) {
123 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
125 goto out_free_pr_reg_cache;
127 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
128 sizeof(struct t10_alua_lu_gp_member),
129 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
130 if (!t10_alua_lu_gp_mem_cache) {
131 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
133 goto out_free_lu_gp_cache;
135 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
136 sizeof(struct t10_alua_tg_pt_gp),
137 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
138 if (!t10_alua_tg_pt_gp_cache) {
139 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
141 goto out_free_lu_gp_mem_cache;
143 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
144 "t10_alua_tg_pt_gp_mem_cache",
145 sizeof(struct t10_alua_tg_pt_gp_member),
146 __alignof__(struct t10_alua_tg_pt_gp_member),
148 if (!t10_alua_tg_pt_gp_mem_cache) {
149 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
151 goto out_free_tg_pt_gp_cache;
154 target_completion_wq = alloc_workqueue("target_completion",
156 if (!target_completion_wq)
157 goto out_free_tg_pt_gp_mem_cache;
161 out_free_tg_pt_gp_mem_cache:
162 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
163 out_free_tg_pt_gp_cache:
164 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
165 out_free_lu_gp_mem_cache:
166 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
167 out_free_lu_gp_cache:
168 kmem_cache_destroy(t10_alua_lu_gp_cache);
169 out_free_pr_reg_cache:
170 kmem_cache_destroy(t10_pr_reg_cache);
172 kmem_cache_destroy(se_ua_cache);
174 kmem_cache_destroy(se_sess_cache);
175 out_free_tmr_req_cache:
176 kmem_cache_destroy(se_tmr_req_cache);
181 void release_se_kmem_caches(void)
183 destroy_workqueue(target_completion_wq);
184 kmem_cache_destroy(se_tmr_req_cache);
185 kmem_cache_destroy(se_sess_cache);
186 kmem_cache_destroy(se_ua_cache);
187 kmem_cache_destroy(t10_pr_reg_cache);
188 kmem_cache_destroy(t10_alua_lu_gp_cache);
189 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
190 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
191 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
194 /* This code ensures unique mib indexes are handed out. */
195 static DEFINE_SPINLOCK(scsi_mib_index_lock);
196 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
199 * Allocate a new row index for the entry type specified
201 u32 scsi_get_new_index(scsi_index_t type)
205 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
207 spin_lock(&scsi_mib_index_lock);
208 new_index = ++scsi_mib_index[type];
209 spin_unlock(&scsi_mib_index_lock);
214 static void transport_init_queue_obj(struct se_queue_obj *qobj)
216 atomic_set(&qobj->queue_cnt, 0);
217 INIT_LIST_HEAD(&qobj->qobj_list);
218 init_waitqueue_head(&qobj->thread_wq);
219 spin_lock_init(&qobj->cmd_queue_lock);
222 void transport_subsystem_check_init(void)
226 if (sub_api_initialized)
229 ret = request_module("target_core_iblock");
231 pr_err("Unable to load target_core_iblock\n");
233 ret = request_module("target_core_file");
235 pr_err("Unable to load target_core_file\n");
237 ret = request_module("target_core_pscsi");
239 pr_err("Unable to load target_core_pscsi\n");
241 ret = request_module("target_core_stgt");
243 pr_err("Unable to load target_core_stgt\n");
245 sub_api_initialized = 1;
249 struct se_session *transport_init_session(void)
251 struct se_session *se_sess;
253 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
255 pr_err("Unable to allocate struct se_session from"
257 return ERR_PTR(-ENOMEM);
259 INIT_LIST_HEAD(&se_sess->sess_list);
260 INIT_LIST_HEAD(&se_sess->sess_acl_list);
261 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
262 INIT_LIST_HEAD(&se_sess->sess_wait_list);
263 spin_lock_init(&se_sess->sess_cmd_lock);
267 EXPORT_SYMBOL(transport_init_session);
270 * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
272 void __transport_register_session(
273 struct se_portal_group *se_tpg,
274 struct se_node_acl *se_nacl,
275 struct se_session *se_sess,
276 void *fabric_sess_ptr)
278 unsigned char buf[PR_REG_ISID_LEN];
280 se_sess->se_tpg = se_tpg;
281 se_sess->fabric_sess_ptr = fabric_sess_ptr;
283 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
285 * Only set for struct se_session's that will actually be moving I/O.
286 * eg: *NOT* discovery sessions.
290 * If the fabric module supports an ISID based TransportID,
291 * save this value in binary from the fabric I_T Nexus now.
293 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
294 memset(&buf[0], 0, PR_REG_ISID_LEN);
295 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
296 &buf[0], PR_REG_ISID_LEN);
297 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
299 spin_lock_irq(&se_nacl->nacl_sess_lock);
301 * The se_nacl->nacl_sess pointer will be set to the
302 * last active I_T Nexus for each struct se_node_acl.
304 se_nacl->nacl_sess = se_sess;
306 list_add_tail(&se_sess->sess_acl_list,
307 &se_nacl->acl_sess_list);
308 spin_unlock_irq(&se_nacl->nacl_sess_lock);
310 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
312 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
313 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
315 EXPORT_SYMBOL(__transport_register_session);
317 void transport_register_session(
318 struct se_portal_group *se_tpg,
319 struct se_node_acl *se_nacl,
320 struct se_session *se_sess,
321 void *fabric_sess_ptr)
323 spin_lock_bh(&se_tpg->session_lock);
324 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
325 spin_unlock_bh(&se_tpg->session_lock);
327 EXPORT_SYMBOL(transport_register_session);
329 void transport_deregister_session_configfs(struct se_session *se_sess)
331 struct se_node_acl *se_nacl;
334 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
336 se_nacl = se_sess->se_node_acl;
338 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
339 list_del(&se_sess->sess_acl_list);
341 * If the session list is empty, then clear the pointer.
342 * Otherwise, set the struct se_session pointer from the tail
343 * element of the per struct se_node_acl active session list.
345 if (list_empty(&se_nacl->acl_sess_list))
346 se_nacl->nacl_sess = NULL;
348 se_nacl->nacl_sess = container_of(
349 se_nacl->acl_sess_list.prev,
350 struct se_session, sess_acl_list);
352 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
355 EXPORT_SYMBOL(transport_deregister_session_configfs);
357 void transport_free_session(struct se_session *se_sess)
359 kmem_cache_free(se_sess_cache, se_sess);
361 EXPORT_SYMBOL(transport_free_session);
363 void transport_deregister_session(struct se_session *se_sess)
365 struct se_portal_group *se_tpg = se_sess->se_tpg;
366 struct se_node_acl *se_nacl;
370 transport_free_session(se_sess);
374 spin_lock_irqsave(&se_tpg->session_lock, flags);
375 list_del(&se_sess->sess_list);
376 se_sess->se_tpg = NULL;
377 se_sess->fabric_sess_ptr = NULL;
378 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
381 * Determine if we need to do extra work for this initiator node's
382 * struct se_node_acl if it had been previously dynamically generated.
384 se_nacl = se_sess->se_node_acl;
386 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
387 if (se_nacl->dynamic_node_acl) {
388 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
390 list_del(&se_nacl->acl_list);
391 se_tpg->num_node_acls--;
392 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
394 core_tpg_wait_for_nacl_pr_ref(se_nacl);
395 core_free_device_list_for_node(se_nacl, se_tpg);
396 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
398 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
401 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
404 transport_free_session(se_sess);
406 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
407 se_tpg->se_tpg_tfo->get_fabric_name());
409 EXPORT_SYMBOL(transport_deregister_session);
412 * Called with cmd->t_state_lock held.
414 static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
416 struct se_device *dev = cmd->se_dev;
417 struct se_task *task;
423 list_for_each_entry(task, &cmd->t_task_list, t_list) {
424 if (task->task_flags & TF_ACTIVE)
427 if (!atomic_read(&task->task_state_active))
430 spin_lock_irqsave(&dev->execute_task_lock, flags);
431 list_del(&task->t_state_list);
432 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
433 cmd->se_tfo->get_task_tag(cmd), dev, task);
434 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
436 atomic_set(&task->task_state_active, 0);
437 atomic_dec(&cmd->t_task_cdbs_ex_left);
441 /* transport_cmd_check_stop():
443 * 'transport_off = 1' determines if t_transport_active should be cleared.
444 * 'transport_off = 2' determines if task_dev_state should be removed.
446 * A non-zero u8 t_state sets cmd->t_state.
447 * Returns 1 when command is stopped, else 0.
449 static int transport_cmd_check_stop(
456 spin_lock_irqsave(&cmd->t_state_lock, flags);
458 * Determine if IOCTL context caller in requesting the stopping of this
459 * command for LUN shutdown purposes.
461 if (atomic_read(&cmd->transport_lun_stop)) {
462 pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
463 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
464 cmd->se_tfo->get_task_tag(cmd));
466 atomic_set(&cmd->t_transport_active, 0);
467 if (transport_off == 2)
468 transport_all_task_dev_remove_state(cmd);
469 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
471 complete(&cmd->transport_lun_stop_comp);
475 * Determine if frontend context caller is requesting the stopping of
476 * this command for frontend exceptions.
478 if (atomic_read(&cmd->t_transport_stop)) {
479 pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
480 " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
481 cmd->se_tfo->get_task_tag(cmd));
483 if (transport_off == 2)
484 transport_all_task_dev_remove_state(cmd);
487 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
490 if (transport_off == 2)
492 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
494 complete(&cmd->t_transport_stop_comp);
498 atomic_set(&cmd->t_transport_active, 0);
499 if (transport_off == 2) {
500 transport_all_task_dev_remove_state(cmd);
502 * Clear struct se_cmd->se_lun before the transport_off == 2
503 * handoff to fabric module.
507 * Some fabric modules like tcm_loop can release
508 * their internally allocated I/O reference now and
511 * Fabric modules are expected to return '1' here if the
512 * se_cmd being passed is released at this point,
513 * or zero if not being released.
515 if (cmd->se_tfo->check_stop_free != NULL) {
516 spin_unlock_irqrestore(
517 &cmd->t_state_lock, flags);
519 return cmd->se_tfo->check_stop_free(cmd);
522 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
526 cmd->t_state = t_state;
527 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
532 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
534 return transport_cmd_check_stop(cmd, 2, 0);
537 static void transport_lun_remove_cmd(struct se_cmd *cmd)
539 struct se_lun *lun = cmd->se_lun;
545 spin_lock_irqsave(&cmd->t_state_lock, flags);
546 if (!atomic_read(&cmd->transport_dev_active)) {
547 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
550 atomic_set(&cmd->transport_dev_active, 0);
551 transport_all_task_dev_remove_state(cmd);
552 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
556 spin_lock_irqsave(&lun->lun_cmd_lock, flags);
557 if (atomic_read(&cmd->transport_lun_active)) {
558 list_del(&cmd->se_lun_node);
559 atomic_set(&cmd->transport_lun_active, 0);
561 pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
562 cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
565 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
568 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
570 if (!cmd->se_tmr_req)
571 transport_lun_remove_cmd(cmd);
573 if (transport_cmd_check_stop_to_fabric(cmd))
576 transport_remove_cmd_from_queue(cmd);
577 transport_put_cmd(cmd);
581 static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
584 struct se_device *dev = cmd->se_dev;
585 struct se_queue_obj *qobj = &dev->dev_queue_obj;
589 spin_lock_irqsave(&cmd->t_state_lock, flags);
590 cmd->t_state = t_state;
591 atomic_set(&cmd->t_transport_active, 1);
592 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
595 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
597 /* If the cmd is already on the list, remove it before we add it */
598 if (!list_empty(&cmd->se_queue_node))
599 list_del(&cmd->se_queue_node);
601 atomic_inc(&qobj->queue_cnt);
604 list_add(&cmd->se_queue_node, &qobj->qobj_list);
606 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
607 atomic_set(&cmd->t_transport_queue_active, 1);
608 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
610 wake_up_interruptible(&qobj->thread_wq);
613 static struct se_cmd *
614 transport_get_cmd_from_queue(struct se_queue_obj *qobj)
619 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
620 if (list_empty(&qobj->qobj_list)) {
621 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
624 cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
626 atomic_set(&cmd->t_transport_queue_active, 0);
628 list_del_init(&cmd->se_queue_node);
629 atomic_dec(&qobj->queue_cnt);
630 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
635 static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
637 struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
640 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
641 if (!atomic_read(&cmd->t_transport_queue_active)) {
642 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
645 atomic_set(&cmd->t_transport_queue_active, 0);
646 atomic_dec(&qobj->queue_cnt);
647 list_del_init(&cmd->se_queue_node);
648 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
650 if (atomic_read(&cmd->t_transport_queue_active)) {
651 pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
652 cmd->se_tfo->get_task_tag(cmd),
653 atomic_read(&cmd->t_transport_queue_active));
658 * Completion function used by TCM subsystem plugins (such as FILEIO)
659 * for queueing up response from struct se_subsystem_api->do_task()
661 void transport_complete_sync_cache(struct se_cmd *cmd, int good)
663 struct se_task *task = list_entry(cmd->t_task_list.next,
664 struct se_task, t_list);
667 cmd->scsi_status = SAM_STAT_GOOD;
668 task->task_scsi_status = GOOD;
670 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
671 task->task_se_cmd->scsi_sense_reason =
672 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
676 transport_complete_task(task, good);
678 EXPORT_SYMBOL(transport_complete_sync_cache);
680 static void target_complete_failure_work(struct work_struct *work)
682 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
684 transport_generic_request_failure(cmd);
687 /* transport_complete_task():
689 * Called from interrupt and non interrupt context depending
690 * on the transport plugin.
692 void transport_complete_task(struct se_task *task, int success)
694 struct se_cmd *cmd = task->task_se_cmd;
695 struct se_device *dev = cmd->se_dev;
698 pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
699 cmd->t_task_cdb[0], dev);
702 atomic_inc(&dev->depth_left);
704 spin_lock_irqsave(&cmd->t_state_lock, flags);
705 task->task_flags &= ~TF_ACTIVE;
708 * See if any sense data exists, if so set the TASK_SENSE flag.
709 * Also check for any other post completion work that needs to be
710 * done by the plugins.
712 if (dev && dev->transport->transport_complete) {
713 if (dev->transport->transport_complete(task) != 0) {
714 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
715 task->task_sense = 1;
721 * See if we are waiting for outstanding struct se_task
722 * to complete for an exception condition
724 if (task->task_flags & TF_REQUEST_STOP) {
725 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
726 complete(&task->task_stop_comp);
731 cmd->t_tasks_failed = 1;
734 * Decrement the outstanding t_task_cdbs_left count. The last
735 * struct se_task from struct se_cmd will complete itself into the
736 * device queue depending upon int success.
738 if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
739 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
743 if (cmd->t_tasks_failed) {
744 if (!task->task_error_status) {
745 task->task_error_status =
746 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
747 cmd->scsi_sense_reason =
748 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
751 INIT_WORK(&cmd->work, target_complete_failure_work);
753 atomic_set(&cmd->t_transport_complete, 1);
754 INIT_WORK(&cmd->work, target_complete_ok_work);
757 cmd->t_state = TRANSPORT_COMPLETE;
758 atomic_set(&cmd->t_transport_active, 1);
759 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
761 queue_work(target_completion_wq, &cmd->work);
763 EXPORT_SYMBOL(transport_complete_task);
766 * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
767 * struct se_task list are ready to be added to the active execution list
770 * Called with se_dev_t->execute_task_lock called.
772 static inline int transport_add_task_check_sam_attr(
773 struct se_task *task,
774 struct se_task *task_prev,
775 struct se_device *dev)
778 * No SAM Task attribute emulation enabled, add to tail of
781 if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
782 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
786 * HEAD_OF_QUEUE attribute for received CDB, which means
787 * the first task that is associated with a struct se_cmd goes to
788 * head of the struct se_device->execute_task_list, and task_prev
789 * after that for each subsequent task
791 if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
792 list_add(&task->t_execute_list,
793 (task_prev != NULL) ?
794 &task_prev->t_execute_list :
795 &dev->execute_task_list);
797 pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
798 " in execution queue\n",
799 task->task_se_cmd->t_task_cdb[0]);
803 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
804 * transitioned from Dermant -> Active state, and are added to the end
805 * of the struct se_device->execute_task_list
807 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
811 /* __transport_add_task_to_execute_queue():
813 * Called with se_dev_t->execute_task_lock called.
815 static void __transport_add_task_to_execute_queue(
816 struct se_task *task,
817 struct se_task *task_prev,
818 struct se_device *dev)
822 head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
823 atomic_inc(&dev->execute_tasks);
825 if (atomic_read(&task->task_state_active))
828 * Determine if this task needs to go to HEAD_OF_QUEUE for the
829 * state list as well. Running with SAM Task Attribute emulation
830 * will always return head_of_queue == 0 here
833 list_add(&task->t_state_list, (task_prev) ?
834 &task_prev->t_state_list :
835 &dev->state_task_list);
837 list_add_tail(&task->t_state_list, &dev->state_task_list);
839 atomic_set(&task->task_state_active, 1);
841 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
842 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
846 static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
848 struct se_device *dev = cmd->se_dev;
849 struct se_task *task;
852 spin_lock_irqsave(&cmd->t_state_lock, flags);
853 list_for_each_entry(task, &cmd->t_task_list, t_list) {
854 if (atomic_read(&task->task_state_active))
857 spin_lock(&dev->execute_task_lock);
858 list_add_tail(&task->t_state_list, &dev->state_task_list);
859 atomic_set(&task->task_state_active, 1);
861 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
862 task->task_se_cmd->se_tfo->get_task_tag(
863 task->task_se_cmd), task, dev);
865 spin_unlock(&dev->execute_task_lock);
867 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
870 static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
872 struct se_device *dev = cmd->se_dev;
873 struct se_task *task, *task_prev = NULL;
876 spin_lock_irqsave(&dev->execute_task_lock, flags);
877 list_for_each_entry(task, &cmd->t_task_list, t_list) {
878 if (!list_empty(&task->t_execute_list))
881 * __transport_add_task_to_execute_queue() handles the
882 * SAM Task Attribute emulation if enabled
884 __transport_add_task_to_execute_queue(task, task_prev, dev);
887 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
890 void __transport_remove_task_from_execute_queue(struct se_task *task,
891 struct se_device *dev)
893 list_del_init(&task->t_execute_list);
894 atomic_dec(&dev->execute_tasks);
897 static void transport_remove_task_from_execute_queue(
898 struct se_task *task,
899 struct se_device *dev)
903 if (WARN_ON(list_empty(&task->t_execute_list)))
906 spin_lock_irqsave(&dev->execute_task_lock, flags);
907 __transport_remove_task_from_execute_queue(task, dev);
908 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
912 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
915 static void target_qf_do_work(struct work_struct *work)
917 struct se_device *dev = container_of(work, struct se_device,
919 LIST_HEAD(qf_cmd_list);
920 struct se_cmd *cmd, *cmd_tmp;
922 spin_lock_irq(&dev->qf_cmd_lock);
923 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
924 spin_unlock_irq(&dev->qf_cmd_lock);
926 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
927 list_del(&cmd->se_qf_node);
928 atomic_dec(&dev->dev_qf_count);
929 smp_mb__after_atomic_dec();
931 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
932 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
933 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
934 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
937 transport_add_cmd_to_queue(cmd, cmd->t_state, true);
941 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
943 switch (cmd->data_direction) {
946 case DMA_FROM_DEVICE:
950 case DMA_BIDIRECTIONAL:
959 void transport_dump_dev_state(
960 struct se_device *dev,
964 *bl += sprintf(b + *bl, "Status: ");
965 switch (dev->dev_status) {
966 case TRANSPORT_DEVICE_ACTIVATED:
967 *bl += sprintf(b + *bl, "ACTIVATED");
969 case TRANSPORT_DEVICE_DEACTIVATED:
970 *bl += sprintf(b + *bl, "DEACTIVATED");
972 case TRANSPORT_DEVICE_SHUTDOWN:
973 *bl += sprintf(b + *bl, "SHUTDOWN");
975 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
976 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
977 *bl += sprintf(b + *bl, "OFFLINE");
980 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
984 *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d",
985 atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
987 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
988 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
989 *bl += sprintf(b + *bl, " ");
992 void transport_dump_vpd_proto_id(
994 unsigned char *p_buf,
997 unsigned char buf[VPD_TMP_BUF_SIZE];
1000 memset(buf, 0, VPD_TMP_BUF_SIZE);
1001 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
1003 switch (vpd->protocol_identifier) {
1005 sprintf(buf+len, "Fibre Channel\n");
1008 sprintf(buf+len, "Parallel SCSI\n");
1011 sprintf(buf+len, "SSA\n");
1014 sprintf(buf+len, "IEEE 1394\n");
1017 sprintf(buf+len, "SCSI Remote Direct Memory Access"
1021 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1024 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1027 sprintf(buf+len, "Automation/Drive Interface Transport"
1031 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1034 sprintf(buf+len, "Unknown 0x%02x\n",
1035 vpd->protocol_identifier);
1040 strncpy(p_buf, buf, p_buf_len);
1042 pr_debug("%s", buf);
1046 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1049 * Check if the Protocol Identifier Valid (PIV) bit is set..
1051 * from spc3r23.pdf section 7.5.1
1053 if (page_83[1] & 0x80) {
1054 vpd->protocol_identifier = (page_83[0] & 0xf0);
1055 vpd->protocol_identifier_set = 1;
1056 transport_dump_vpd_proto_id(vpd, NULL, 0);
1059 EXPORT_SYMBOL(transport_set_vpd_proto_id);
1061 int transport_dump_vpd_assoc(
1062 struct t10_vpd *vpd,
1063 unsigned char *p_buf,
1066 unsigned char buf[VPD_TMP_BUF_SIZE];
1070 memset(buf, 0, VPD_TMP_BUF_SIZE);
1071 len = sprintf(buf, "T10 VPD Identifier Association: ");
1073 switch (vpd->association) {
1075 sprintf(buf+len, "addressed logical unit\n");
1078 sprintf(buf+len, "target port\n");
1081 sprintf(buf+len, "SCSI target device\n");
1084 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1090 strncpy(p_buf, buf, p_buf_len);
1092 pr_debug("%s", buf);
1097 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1100 * The VPD identification association..
1102 * from spc3r23.pdf Section 7.6.3.1 Table 297
1104 vpd->association = (page_83[1] & 0x30);
1105 return transport_dump_vpd_assoc(vpd, NULL, 0);
1107 EXPORT_SYMBOL(transport_set_vpd_assoc);
1109 int transport_dump_vpd_ident_type(
1110 struct t10_vpd *vpd,
1111 unsigned char *p_buf,
1114 unsigned char buf[VPD_TMP_BUF_SIZE];
1118 memset(buf, 0, VPD_TMP_BUF_SIZE);
1119 len = sprintf(buf, "T10 VPD Identifier Type: ");
1121 switch (vpd->device_identifier_type) {
1123 sprintf(buf+len, "Vendor specific\n");
1126 sprintf(buf+len, "T10 Vendor ID based\n");
1129 sprintf(buf+len, "EUI-64 based\n");
1132 sprintf(buf+len, "NAA\n");
1135 sprintf(buf+len, "Relative target port identifier\n");
1138 sprintf(buf+len, "SCSI name string\n");
1141 sprintf(buf+len, "Unsupported: 0x%02x\n",
1142 vpd->device_identifier_type);
1148 if (p_buf_len < strlen(buf)+1)
1150 strncpy(p_buf, buf, p_buf_len);
1152 pr_debug("%s", buf);
1158 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1161 * The VPD identifier type..
1163 * from spc3r23.pdf Section 7.6.3.1 Table 298
1165 vpd->device_identifier_type = (page_83[1] & 0x0f);
1166 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1168 EXPORT_SYMBOL(transport_set_vpd_ident_type);
1170 int transport_dump_vpd_ident(
1171 struct t10_vpd *vpd,
1172 unsigned char *p_buf,
1175 unsigned char buf[VPD_TMP_BUF_SIZE];
1178 memset(buf, 0, VPD_TMP_BUF_SIZE);
1180 switch (vpd->device_identifier_code_set) {
1181 case 0x01: /* Binary */
1182 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1183 &vpd->device_identifier[0]);
1185 case 0x02: /* ASCII */
1186 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1187 &vpd->device_identifier[0]);
1189 case 0x03: /* UTF-8 */
1190 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1191 &vpd->device_identifier[0]);
1194 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1195 " 0x%02x", vpd->device_identifier_code_set);
1201 strncpy(p_buf, buf, p_buf_len);
1203 pr_debug("%s", buf);
1209 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1211 static const char hex_str[] = "0123456789abcdef";
1212 int j = 0, i = 4; /* offset to start of the identifer */
1215 * The VPD Code Set (encoding)
1217 * from spc3r23.pdf Section 7.6.3.1 Table 296
1219 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1220 switch (vpd->device_identifier_code_set) {
1221 case 0x01: /* Binary */
1222 vpd->device_identifier[j++] =
1223 hex_str[vpd->device_identifier_type];
1224 while (i < (4 + page_83[3])) {
1225 vpd->device_identifier[j++] =
1226 hex_str[(page_83[i] & 0xf0) >> 4];
1227 vpd->device_identifier[j++] =
1228 hex_str[page_83[i] & 0x0f];
1232 case 0x02: /* ASCII */
1233 case 0x03: /* UTF-8 */
1234 while (i < (4 + page_83[3]))
1235 vpd->device_identifier[j++] = page_83[i++];
1241 return transport_dump_vpd_ident(vpd, NULL, 0);
1243 EXPORT_SYMBOL(transport_set_vpd_ident);
1245 static void core_setup_task_attr_emulation(struct se_device *dev)
1248 * If this device is from Target_Core_Mod/pSCSI, disable the
1249 * SAM Task Attribute emulation.
1251 * This is currently not available in upsream Linux/SCSI Target
1252 * mode code, and is assumed to be disabled while using TCM/pSCSI.
1254 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1255 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1259 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1260 pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1261 " device\n", dev->transport->name,
1262 dev->transport->get_device_rev(dev));
1265 static void scsi_dump_inquiry(struct se_device *dev)
1267 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1270 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1272 pr_debug(" Vendor: ");
1273 for (i = 0; i < 8; i++)
1274 if (wwn->vendor[i] >= 0x20)
1275 pr_debug("%c", wwn->vendor[i]);
1279 pr_debug(" Model: ");
1280 for (i = 0; i < 16; i++)
1281 if (wwn->model[i] >= 0x20)
1282 pr_debug("%c", wwn->model[i]);
1286 pr_debug(" Revision: ");
1287 for (i = 0; i < 4; i++)
1288 if (wwn->revision[i] >= 0x20)
1289 pr_debug("%c", wwn->revision[i]);
1295 device_type = dev->transport->get_device_type(dev);
1296 pr_debug(" Type: %s ", scsi_device_type(device_type));
1297 pr_debug(" ANSI SCSI revision: %02x\n",
1298 dev->transport->get_device_rev(dev));
1301 struct se_device *transport_add_device_to_core_hba(
1303 struct se_subsystem_api *transport,
1304 struct se_subsystem_dev *se_dev,
1306 void *transport_dev,
1307 struct se_dev_limits *dev_limits,
1308 const char *inquiry_prod,
1309 const char *inquiry_rev)
1312 struct se_device *dev;
1314 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1316 pr_err("Unable to allocate memory for se_dev_t\n");
1320 transport_init_queue_obj(&dev->dev_queue_obj);
1321 dev->dev_flags = device_flags;
1322 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
1323 dev->dev_ptr = transport_dev;
1325 dev->se_sub_dev = se_dev;
1326 dev->transport = transport;
1327 INIT_LIST_HEAD(&dev->dev_list);
1328 INIT_LIST_HEAD(&dev->dev_sep_list);
1329 INIT_LIST_HEAD(&dev->dev_tmr_list);
1330 INIT_LIST_HEAD(&dev->execute_task_list);
1331 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1332 INIT_LIST_HEAD(&dev->state_task_list);
1333 INIT_LIST_HEAD(&dev->qf_cmd_list);
1334 spin_lock_init(&dev->execute_task_lock);
1335 spin_lock_init(&dev->delayed_cmd_lock);
1336 spin_lock_init(&dev->dev_reservation_lock);
1337 spin_lock_init(&dev->dev_status_lock);
1338 spin_lock_init(&dev->se_port_lock);
1339 spin_lock_init(&dev->se_tmr_lock);
1340 spin_lock_init(&dev->qf_cmd_lock);
1342 dev->queue_depth = dev_limits->queue_depth;
1343 atomic_set(&dev->depth_left, dev->queue_depth);
1344 atomic_set(&dev->dev_ordered_id, 0);
1346 se_dev_set_default_attribs(dev, dev_limits);
1348 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1349 dev->creation_time = get_jiffies_64();
1350 spin_lock_init(&dev->stats_lock);
1352 spin_lock(&hba->device_lock);
1353 list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1355 spin_unlock(&hba->device_lock);
1357 * Setup the SAM Task Attribute emulation for struct se_device
1359 core_setup_task_attr_emulation(dev);
1361 * Force PR and ALUA passthrough emulation with internal object use.
1363 force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1365 * Setup the Reservations infrastructure for struct se_device
1367 core_setup_reservations(dev, force_pt);
1369 * Setup the Asymmetric Logical Unit Assignment for struct se_device
1371 if (core_setup_alua(dev, force_pt) < 0)
1375 * Startup the struct se_device processing thread
1377 dev->process_thread = kthread_run(transport_processing_thread, dev,
1378 "LIO_%s", dev->transport->name);
1379 if (IS_ERR(dev->process_thread)) {
1380 pr_err("Unable to create kthread: LIO_%s\n",
1381 dev->transport->name);
1385 * Setup work_queue for QUEUE_FULL
1387 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1389 * Preload the initial INQUIRY const values if we are doing
1390 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1391 * passthrough because this is being provided by the backend LLD.
1392 * This is required so that transport_get_inquiry() copies these
1393 * originals once back into DEV_T10_WWN(dev) for the virtual device
1396 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1397 if (!inquiry_prod || !inquiry_rev) {
1398 pr_err("All non TCM/pSCSI plugins require"
1399 " INQUIRY consts\n");
1403 strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1404 strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
1405 strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1407 scsi_dump_inquiry(dev);
1411 kthread_stop(dev->process_thread);
1413 spin_lock(&hba->device_lock);
1414 list_del(&dev->dev_list);
1416 spin_unlock(&hba->device_lock);
1418 se_release_vpd_for_dev(dev);
1424 EXPORT_SYMBOL(transport_add_device_to_core_hba);
1426 /* transport_generic_prepare_cdb():
1428 * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
1429 * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
1430 * The point of this is since we are mapping iSCSI LUNs to
1431 * SCSI Target IDs having a non-zero LUN in the CDB will throw the
1432 * devices and HBAs for a loop.
1434 static inline void transport_generic_prepare_cdb(
1438 case READ_10: /* SBC - RDProtect */
1439 case READ_12: /* SBC - RDProtect */
1440 case READ_16: /* SBC - RDProtect */
1441 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1442 case VERIFY: /* SBC - VRProtect */
1443 case VERIFY_16: /* SBC - VRProtect */
1444 case WRITE_VERIFY: /* SBC - VRProtect */
1445 case WRITE_VERIFY_12: /* SBC - VRProtect */
1448 cdb[1] &= 0x1f; /* clear logical unit number */
1453 static struct se_task *
1454 transport_generic_get_task(struct se_cmd *cmd,
1455 enum dma_data_direction data_direction)
1457 struct se_task *task;
1458 struct se_device *dev = cmd->se_dev;
1460 task = dev->transport->alloc_task(cmd->t_task_cdb);
1462 pr_err("Unable to allocate struct se_task\n");
1466 INIT_LIST_HEAD(&task->t_list);
1467 INIT_LIST_HEAD(&task->t_execute_list);
1468 INIT_LIST_HEAD(&task->t_state_list);
1469 init_completion(&task->task_stop_comp);
1470 task->task_se_cmd = cmd;
1471 task->task_data_direction = data_direction;
1476 static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1479 * Used by fabric modules containing a local struct se_cmd within their
1480 * fabric dependent per I/O descriptor.
1482 void transport_init_se_cmd(
1484 struct target_core_fabric_ops *tfo,
1485 struct se_session *se_sess,
1489 unsigned char *sense_buffer)
1491 INIT_LIST_HEAD(&cmd->se_lun_node);
1492 INIT_LIST_HEAD(&cmd->se_delayed_node);
1493 INIT_LIST_HEAD(&cmd->se_qf_node);
1494 INIT_LIST_HEAD(&cmd->se_queue_node);
1495 INIT_LIST_HEAD(&cmd->se_cmd_list);
1496 INIT_LIST_HEAD(&cmd->t_task_list);
1497 init_completion(&cmd->transport_lun_fe_stop_comp);
1498 init_completion(&cmd->transport_lun_stop_comp);
1499 init_completion(&cmd->t_transport_stop_comp);
1500 init_completion(&cmd->cmd_wait_comp);
1501 spin_lock_init(&cmd->t_state_lock);
1502 atomic_set(&cmd->transport_dev_active, 1);
1505 cmd->se_sess = se_sess;
1506 cmd->data_length = data_length;
1507 cmd->data_direction = data_direction;
1508 cmd->sam_task_attr = task_attr;
1509 cmd->sense_buffer = sense_buffer;
1511 EXPORT_SYMBOL(transport_init_se_cmd);
1513 static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1516 * Check if SAM Task Attribute emulation is enabled for this
1517 * struct se_device storage object
1519 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1522 if (cmd->sam_task_attr == MSG_ACA_TAG) {
1523 pr_debug("SAM Task Attribute ACA"
1524 " emulation is not supported\n");
1528 * Used to determine when ORDERED commands should go from
1529 * Dormant to Active status.
1531 cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1532 smp_mb__after_atomic_inc();
1533 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1534 cmd->se_ordered_id, cmd->sam_task_attr,
1535 cmd->se_dev->transport->name);
1539 /* transport_generic_allocate_tasks():
1541 * Called from fabric RX Thread.
1543 int transport_generic_allocate_tasks(
1549 transport_generic_prepare_cdb(cdb);
1551 * Ensure that the received CDB is less than the max (252 + 8) bytes
1552 * for VARIABLE_LENGTH_CMD
1554 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1555 pr_err("Received SCSI CDB with command_size: %d that"
1556 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1557 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1558 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1559 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1563 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1564 * allocate the additional extended CDB buffer now.. Otherwise
1565 * setup the pointer from __t_task_cdb to t_task_cdb.
1567 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1568 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1570 if (!cmd->t_task_cdb) {
1571 pr_err("Unable to allocate cmd->t_task_cdb"
1572 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1573 scsi_command_size(cdb),
1574 (unsigned long)sizeof(cmd->__t_task_cdb));
1575 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1576 cmd->scsi_sense_reason =
1577 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1581 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1583 * Copy the original CDB into cmd->
1585 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1587 * Setup the received CDB based on SCSI defined opcodes and
1588 * perform unit attention, persistent reservations and ALUA
1589 * checks for virtual device backends. The cmd->t_task_cdb
1590 * pointer is expected to be setup before we reach this point.
1592 ret = transport_generic_cmd_sequencer(cmd, cdb);
1596 * Check for SAM Task Attribute Emulation
1598 if (transport_check_alloc_task_attr(cmd) < 0) {
1599 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1600 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1603 spin_lock(&cmd->se_lun->lun_sep_lock);
1604 if (cmd->se_lun->lun_sep)
1605 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1606 spin_unlock(&cmd->se_lun->lun_sep_lock);
1609 EXPORT_SYMBOL(transport_generic_allocate_tasks);
1612 * Used by fabric module frontends to queue tasks directly.
1613 * Many only be used from process context only
1615 int transport_handle_cdb_direct(
1622 pr_err("cmd->se_lun is NULL\n");
1625 if (in_interrupt()) {
1627 pr_err("transport_generic_handle_cdb cannot be called"
1628 " from interrupt context\n");
1632 * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
1633 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
1634 * in existing usage to ensure that outstanding descriptors are handled
1635 * correctly during shutdown via transport_wait_for_tasks()
1637 * Also, we don't take cmd->t_state_lock here as we only expect
1638 * this to be called for initial descriptor submission.
1640 cmd->t_state = TRANSPORT_NEW_CMD;
1641 atomic_set(&cmd->t_transport_active, 1);
1643 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1644 * so follow TRANSPORT_NEW_CMD processing thread context usage
1645 * and call transport_generic_request_failure() if necessary..
1647 ret = transport_generic_new_cmd(cmd);
1649 transport_generic_request_failure(cmd);
1653 EXPORT_SYMBOL(transport_handle_cdb_direct);
1656 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
1657 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
1658 * complete setup in TCM process context w/ TFO->new_cmd_map().
1660 int transport_generic_handle_cdb_map(
1665 pr_err("cmd->se_lun is NULL\n");
1669 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
1672 EXPORT_SYMBOL(transport_generic_handle_cdb_map);
1674 /* transport_generic_handle_data():
1678 int transport_generic_handle_data(
1682 * For the software fabric case, then we assume the nexus is being
1683 * failed/shutdown when signals are pending from the kthread context
1684 * caller, so we return a failure. For the HW target mode case running
1685 * in interrupt code, the signal_pending() check is skipped.
1687 if (!in_interrupt() && signal_pending(current))
1690 * If the received CDB has aleady been ABORTED by the generic
1691 * target engine, we now call transport_check_aborted_status()
1692 * to queue any delated TASK_ABORTED status for the received CDB to the
1693 * fabric module as we are expecting no further incoming DATA OUT
1694 * sequences at this point.
1696 if (transport_check_aborted_status(cmd, 1) != 0)
1699 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
1702 EXPORT_SYMBOL(transport_generic_handle_data);
1704 /* transport_generic_handle_tmr():
1708 int transport_generic_handle_tmr(
1711 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
1714 EXPORT_SYMBOL(transport_generic_handle_tmr);
1717 * If the task is active, request it to be stopped and sleep until it
1720 bool target_stop_task(struct se_task *task, unsigned long *flags)
1722 struct se_cmd *cmd = task->task_se_cmd;
1723 bool was_active = false;
1725 if (task->task_flags & TF_ACTIVE) {
1726 task->task_flags |= TF_REQUEST_STOP;
1727 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1729 pr_debug("Task %p waiting to complete\n", task);
1730 wait_for_completion(&task->task_stop_comp);
1731 pr_debug("Task %p stopped successfully\n", task);
1733 spin_lock_irqsave(&cmd->t_state_lock, *flags);
1734 atomic_dec(&cmd->t_task_cdbs_left);
1735 task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
1742 static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1744 struct se_task *task, *task_tmp;
1745 unsigned long flags;
1748 pr_debug("ITT[0x%08x] - Stopping tasks\n",
1749 cmd->se_tfo->get_task_tag(cmd));
1752 * No tasks remain in the execution queue
1754 spin_lock_irqsave(&cmd->t_state_lock, flags);
1755 list_for_each_entry_safe(task, task_tmp,
1756 &cmd->t_task_list, t_list) {
1757 pr_debug("Processing task %p\n", task);
1759 * If the struct se_task has not been sent and is not active,
1760 * remove the struct se_task from the execution queue.
1762 if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
1763 spin_unlock_irqrestore(&cmd->t_state_lock,
1765 transport_remove_task_from_execute_queue(task,
1768 pr_debug("Task %p removed from execute queue\n", task);
1769 spin_lock_irqsave(&cmd->t_state_lock, flags);
1773 if (!target_stop_task(task, &flags)) {
1774 pr_debug("Task %p - did nothing\n", task);
1778 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1784 * Handle SAM-esque emulation for generic transport request failures.
1786 static void transport_generic_request_failure(struct se_cmd *cmd)
1790 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1791 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1792 cmd->t_task_cdb[0]);
1793 pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1794 cmd->se_tfo->get_cmd_state(cmd),
1795 cmd->t_state, cmd->scsi_sense_reason);
1796 pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
1797 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1798 " t_transport_active: %d t_transport_stop: %d"
1799 " t_transport_sent: %d\n", cmd->t_task_list_num,
1800 atomic_read(&cmd->t_task_cdbs_left),
1801 atomic_read(&cmd->t_task_cdbs_sent),
1802 atomic_read(&cmd->t_task_cdbs_ex_left),
1803 atomic_read(&cmd->t_transport_active),
1804 atomic_read(&cmd->t_transport_stop),
1805 atomic_read(&cmd->t_transport_sent));
1808 * For SAM Task Attribute emulation for failed struct se_cmd
1810 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1811 transport_complete_task_attr(cmd);
1813 switch (cmd->scsi_sense_reason) {
1814 case TCM_NON_EXISTENT_LUN:
1815 case TCM_UNSUPPORTED_SCSI_OPCODE:
1816 case TCM_INVALID_CDB_FIELD:
1817 case TCM_INVALID_PARAMETER_LIST:
1818 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1819 case TCM_UNKNOWN_MODE_PAGE:
1820 case TCM_WRITE_PROTECTED:
1821 case TCM_CHECK_CONDITION_ABORT_CMD:
1822 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1823 case TCM_CHECK_CONDITION_NOT_READY:
1825 case TCM_RESERVATION_CONFLICT:
1827 * No SENSE Data payload for this case, set SCSI Status
1828 * and queue the response to $FABRIC_MOD.
1830 * Uses linux/include/scsi/scsi.h SAM status codes defs
1832 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1834 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1835 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1838 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1841 cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
1842 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
1843 cmd->orig_fe_lun, 0x2C,
1844 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1846 ret = cmd->se_tfo->queue_status(cmd);
1847 if (ret == -EAGAIN || ret == -ENOMEM)
1851 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1852 cmd->t_task_cdb[0], cmd->scsi_sense_reason);
1853 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1857 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
1858 * make the call to transport_send_check_condition_and_sense()
1859 * directly. Otherwise expect the fabric to make the call to
1860 * transport_send_check_condition_and_sense() after handling
1861 * possible unsoliticied write data payloads.
1863 ret = transport_send_check_condition_and_sense(cmd,
1864 cmd->scsi_sense_reason, 0);
1865 if (ret == -EAGAIN || ret == -ENOMEM)
1869 transport_lun_remove_cmd(cmd);
1870 if (!transport_cmd_check_stop_to_fabric(cmd))
1875 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
1876 transport_handle_queue_full(cmd, cmd->se_dev);
1879 static inline u32 transport_lba_21(unsigned char *cdb)
1881 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
1884 static inline u32 transport_lba_32(unsigned char *cdb)
1886 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1889 static inline unsigned long long transport_lba_64(unsigned char *cdb)
1891 unsigned int __v1, __v2;
1893 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1894 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1896 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1900 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
1902 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
1904 unsigned int __v1, __v2;
1906 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
1907 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
1909 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1912 static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
1914 unsigned long flags;
1916 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1917 se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1918 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1921 static inline int transport_tcq_window_closed(struct se_device *dev)
1923 if (dev->dev_tcq_window_closed++ <
1924 PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
1925 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
1927 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
1929 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
1934 * Called from Fabric Module context from transport_execute_tasks()
1936 * The return of this function determins if the tasks from struct se_cmd
1937 * get added to the execution queue in transport_execute_tasks(),
1938 * or are added to the delayed or ordered lists here.
1940 static inline int transport_execute_task_attr(struct se_cmd *cmd)
1942 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1945 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1946 * to allow the passed struct se_cmd list of tasks to the front of the list.
1948 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
1949 pr_debug("Added HEAD_OF_QUEUE for CDB:"
1950 " 0x%02x, se_ordered_id: %u\n",
1952 cmd->se_ordered_id);
1954 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
1955 atomic_inc(&cmd->se_dev->dev_ordered_sync);
1956 smp_mb__after_atomic_inc();
1958 pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
1959 " list, se_ordered_id: %u\n",
1961 cmd->se_ordered_id);
1963 * Add ORDERED command to tail of execution queue if
1964 * no other older commands exist that need to be
1967 if (!atomic_read(&cmd->se_dev->simple_cmds))
1971 * For SIMPLE and UNTAGGED Task Attribute commands
1973 atomic_inc(&cmd->se_dev->simple_cmds);
1974 smp_mb__after_atomic_inc();
1977 * Otherwise if one or more outstanding ORDERED task attribute exist,
1978 * add the dormant task(s) built for the passed struct se_cmd to the
1979 * execution queue and become in Active state for this struct se_device.
1981 if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
1983 * Otherwise, add cmd w/ tasks to delayed cmd queue that
1984 * will be drained upon completion of HEAD_OF_QUEUE task.
1986 spin_lock(&cmd->se_dev->delayed_cmd_lock);
1987 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
1988 list_add_tail(&cmd->se_delayed_node,
1989 &cmd->se_dev->delayed_cmd_list);
1990 spin_unlock(&cmd->se_dev->delayed_cmd_lock);
1992 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
1993 " delayed CMD list, se_ordered_id: %u\n",
1994 cmd->t_task_cdb[0], cmd->sam_task_attr,
1995 cmd->se_ordered_id);
1997 * Return zero to let transport_execute_tasks() know
1998 * not to add the delayed tasks to the execution list.
2003 * Otherwise, no ORDERED task attributes exist..
2009 * Called from fabric module context in transport_generic_new_cmd() and
2010 * transport_generic_process_write()
2012 static int transport_execute_tasks(struct se_cmd *cmd)
2016 if (se_dev_check_online(cmd->se_dev) != 0) {
2017 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2018 transport_generic_request_failure(cmd);
2023 * Call transport_cmd_check_stop() to see if a fabric exception
2024 * has occurred that prevents execution.
2026 if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
2028 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2029 * attribute for the tasks of the received struct se_cmd CDB
2031 add_tasks = transport_execute_task_attr(cmd);
2035 * This calls transport_add_tasks_from_cmd() to handle
2036 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
2037 * (if enabled) in __transport_add_task_to_execute_queue() and
2038 * transport_add_task_check_sam_attr().
2040 transport_add_tasks_from_cmd(cmd);
2043 * Kick the execution queue for the cmd associated struct se_device
2047 __transport_execute_tasks(cmd->se_dev);
2052 * Called to check struct se_device tcq depth window, and once open pull struct se_task
2053 * from struct se_device->execute_task_list and
2055 * Called from transport_processing_thread()
2057 static int __transport_execute_tasks(struct se_device *dev)
2060 struct se_cmd *cmd = NULL;
2061 struct se_task *task = NULL;
2062 unsigned long flags;
2065 * Check if there is enough room in the device and HBA queue to send
2066 * struct se_tasks to the selected transport.
2069 if (!atomic_read(&dev->depth_left))
2070 return transport_tcq_window_closed(dev);
2072 dev->dev_tcq_window_closed = 0;
2074 spin_lock_irq(&dev->execute_task_lock);
2075 if (list_empty(&dev->execute_task_list)) {
2076 spin_unlock_irq(&dev->execute_task_lock);
2079 task = list_first_entry(&dev->execute_task_list,
2080 struct se_task, t_execute_list);
2081 __transport_remove_task_from_execute_queue(task, dev);
2082 spin_unlock_irq(&dev->execute_task_lock);
2084 atomic_dec(&dev->depth_left);
2086 cmd = task->task_se_cmd;
2088 spin_lock_irqsave(&cmd->t_state_lock, flags);
2089 task->task_flags |= (TF_ACTIVE | TF_SENT);
2090 atomic_inc(&cmd->t_task_cdbs_sent);
2092 if (atomic_read(&cmd->t_task_cdbs_sent) ==
2093 cmd->t_task_list_num)
2094 atomic_set(&cmd->t_transport_sent, 1);
2096 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2098 if (cmd->execute_task)
2099 error = cmd->execute_task(task);
2101 error = dev->transport->do_task(task);
2103 spin_lock_irqsave(&cmd->t_state_lock, flags);
2104 task->task_flags &= ~TF_ACTIVE;
2105 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2106 atomic_set(&cmd->t_transport_sent, 0);
2107 transport_stop_tasks_for_cmd(cmd);
2108 atomic_inc(&dev->depth_left);
2109 transport_generic_request_failure(cmd);
2117 static inline u32 transport_get_sectors_6(
2122 struct se_device *dev = cmd->se_dev;
2125 * Assume TYPE_DISK for non struct se_device objects.
2126 * Use 8-bit sector value.
2132 * Use 24-bit allocation length for TYPE_TAPE.
2134 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2135 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2138 * Everything else assume TYPE_DISK Sector CDB location.
2139 * Use 8-bit sector value. SBC-3 says:
2141 * A TRANSFER LENGTH field set to zero specifies that 256
2142 * logical blocks shall be written. Any other value
2143 * specifies the number of logical blocks that shall be
2147 return cdb[4] ? : 256;
2150 static inline u32 transport_get_sectors_10(
2155 struct se_device *dev = cmd->se_dev;
2158 * Assume TYPE_DISK for non struct se_device objects.
2159 * Use 16-bit sector value.
2165 * XXX_10 is not defined in SSC, throw an exception
2167 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2173 * Everything else assume TYPE_DISK Sector CDB location.
2174 * Use 16-bit sector value.
2177 return (u32)(cdb[7] << 8) + cdb[8];
2180 static inline u32 transport_get_sectors_12(
2185 struct se_device *dev = cmd->se_dev;
2188 * Assume TYPE_DISK for non struct se_device objects.
2189 * Use 32-bit sector value.
2195 * XXX_12 is not defined in SSC, throw an exception
2197 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2203 * Everything else assume TYPE_DISK Sector CDB location.
2204 * Use 32-bit sector value.
2207 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2210 static inline u32 transport_get_sectors_16(
2215 struct se_device *dev = cmd->se_dev;
2218 * Assume TYPE_DISK for non struct se_device objects.
2219 * Use 32-bit sector value.
2225 * Use 24-bit allocation length for TYPE_TAPE.
2227 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2228 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2231 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2232 (cdb[12] << 8) + cdb[13];
2236 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2238 static inline u32 transport_get_sectors_32(
2244 * Assume TYPE_DISK for non struct se_device objects.
2245 * Use 32-bit sector value.
2247 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2248 (cdb[30] << 8) + cdb[31];
2252 static inline u32 transport_get_size(
2257 struct se_device *dev = cmd->se_dev;
2259 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2260 if (cdb[1] & 1) { /* sectors */
2261 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2266 pr_debug("Returning block_size: %u, sectors: %u == %u for"
2267 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
2268 dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2269 dev->transport->name);
2271 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2274 static void transport_xor_callback(struct se_cmd *cmd)
2276 unsigned char *buf, *addr;
2277 struct scatterlist *sg;
2278 unsigned int offset;
2282 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2284 * 1) read the specified logical block(s);
2285 * 2) transfer logical blocks from the data-out buffer;
2286 * 3) XOR the logical blocks transferred from the data-out buffer with
2287 * the logical blocks read, storing the resulting XOR data in a buffer;
2288 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2289 * blocks transferred from the data-out buffer; and
2290 * 5) transfer the resulting XOR data to the data-in buffer.
2292 buf = kmalloc(cmd->data_length, GFP_KERNEL);
2294 pr_err("Unable to allocate xor_callback buf\n");
2298 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
2299 * into the locally allocated *buf
2301 sg_copy_to_buffer(cmd->t_data_sg,
2307 * Now perform the XOR against the BIDI read memory located at
2308 * cmd->t_mem_bidi_list
2312 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2313 addr = kmap_atomic(sg_page(sg), KM_USER0);
2317 for (i = 0; i < sg->length; i++)
2318 *(addr + sg->offset + i) ^= *(buf + offset + i);
2320 offset += sg->length;
2321 kunmap_atomic(addr, KM_USER0);
2329 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2331 static int transport_get_sense_data(struct se_cmd *cmd)
2333 unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2334 struct se_device *dev = cmd->se_dev;
2335 struct se_task *task = NULL, *task_tmp;
2336 unsigned long flags;
2339 WARN_ON(!cmd->se_lun);
2344 spin_lock_irqsave(&cmd->t_state_lock, flags);
2345 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2346 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2350 list_for_each_entry_safe(task, task_tmp,
2351 &cmd->t_task_list, t_list) {
2352 if (!task->task_sense)
2355 if (!dev->transport->get_sense_buffer) {
2356 pr_err("dev->transport->get_sense_buffer"
2361 sense_buffer = dev->transport->get_sense_buffer(task);
2362 if (!sense_buffer) {
2363 pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
2364 " sense buffer for task with sense\n",
2365 cmd->se_tfo->get_task_tag(cmd), task);
2368 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2370 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
2371 TRANSPORT_SENSE_BUFFER);
2373 memcpy(&buffer[offset], sense_buffer,
2374 TRANSPORT_SENSE_BUFFER);
2375 cmd->scsi_status = task->task_scsi_status;
2376 /* Automatically padded */
2377 cmd->scsi_sense_length =
2378 (TRANSPORT_SENSE_BUFFER + offset);
2380 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
2382 dev->se_hba->hba_id, dev->transport->name,
2386 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2391 static inline long long transport_dev_end_lba(struct se_device *dev)
2393 return dev->transport->get_blocks(dev) + 1;
2396 static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2398 struct se_device *dev = cmd->se_dev;
2401 if (dev->transport->get_device_type(dev) != TYPE_DISK)
2404 sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
2406 if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
2407 pr_err("LBA: %llu Sectors: %u exceeds"
2408 " transport_dev_end_lba(): %llu\n",
2409 cmd->t_task_lba, sectors,
2410 transport_dev_end_lba(dev));
2417 static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
2420 * Determine if the received WRITE_SAME is used to for direct
2421 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
2422 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
2423 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
2425 int passthrough = (dev->transport->transport_type ==
2426 TRANSPORT_PLUGIN_PHBA_PDEV);
2429 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
2430 pr_err("WRITE_SAME PBDATA and LBDATA"
2431 " bits not supported for Block Discard"
2436 * Currently for the emulated case we only accept
2437 * tpws with the UNMAP=1 bit set.
2439 if (!(flags[0] & 0x08)) {
2440 pr_err("WRITE_SAME w/o UNMAP bit not"
2441 " supported for Block Discard Emulation\n");
2449 /* transport_generic_cmd_sequencer():
2451 * Generic Command Sequencer that should work for most DAS transport
2454 * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
2457 * FIXME: Need to support other SCSI OPCODES where as well.
2459 static int transport_generic_cmd_sequencer(
2463 struct se_device *dev = cmd->se_dev;
2464 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2465 int ret = 0, sector_ret = 0, passthrough;
2466 u32 sectors = 0, size = 0, pr_reg_type = 0;
2470 * Check for an existing UNIT ATTENTION condition
2472 if (core_scsi3_ua_check(cmd, cdb) < 0) {
2473 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2474 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
2478 * Check status of Asymmetric Logical Unit Assignment port
2480 ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
2483 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
2484 * The ALUA additional sense code qualifier (ASCQ) is determined
2485 * by the ALUA primary or secondary access state..
2489 pr_debug("[%s]: ALUA TG Port not available,"
2490 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
2491 cmd->se_tfo->get_fabric_name(), alua_ascq);
2493 transport_set_sense_codes(cmd, 0x04, alua_ascq);
2494 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2495 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
2498 goto out_invalid_cdb_field;
2501 * Check status for SPC-3 Persistent Reservations
2503 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
2504 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
2505 cmd, cdb, pr_reg_type) != 0) {
2506 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2507 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2508 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2512 * This means the CDB is allowed for the SCSI Initiator port
2513 * when said port is *NOT* holding the legacy SPC-2 or
2514 * SPC-3 Persistent Reservation.
2519 * If we operate in passthrough mode we skip most CDB emulation and
2520 * instead hand the commands down to the physical SCSI device.
2523 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
2527 sectors = transport_get_sectors_6(cdb, cmd, §or_ret);
2529 goto out_unsupported_cdb;
2530 size = transport_get_size(sectors, cdb, cmd);
2531 cmd->t_task_lba = transport_lba_21(cdb);
2532 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2535 sectors = transport_get_sectors_10(cdb, cmd, §or_ret);
2537 goto out_unsupported_cdb;
2538 size = transport_get_size(sectors, cdb, cmd);
2539 cmd->t_task_lba = transport_lba_32(cdb);
2540 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2543 sectors = transport_get_sectors_12(cdb, cmd, §or_ret);
2545 goto out_unsupported_cdb;
2546 size = transport_get_size(sectors, cdb, cmd);
2547 cmd->t_task_lba = transport_lba_32(cdb);
2548 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2551 sectors = transport_get_sectors_16(cdb, cmd, §or_ret);
2553 goto out_unsupported_cdb;
2554 size = transport_get_size(sectors, cdb, cmd);
2555 cmd->t_task_lba = transport_lba_64(cdb);
2556 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2559 sectors = transport_get_sectors_6(cdb, cmd, §or_ret);
2561 goto out_unsupported_cdb;
2562 size = transport_get_size(sectors, cdb, cmd);
2563 cmd->t_task_lba = transport_lba_21(cdb);
2564 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2567 sectors = transport_get_sectors_10(cdb, cmd, §or_ret);
2569 goto out_unsupported_cdb;
2570 size = transport_get_size(sectors, cdb, cmd);
2571 cmd->t_task_lba = transport_lba_32(cdb);
2573 cmd->se_cmd_flags |= SCF_FUA;
2574 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2577 sectors = transport_get_sectors_12(cdb, cmd, §or_ret);
2579 goto out_unsupported_cdb;
2580 size = transport_get_size(sectors, cdb, cmd);
2581 cmd->t_task_lba = transport_lba_32(cdb);
2583 cmd->se_cmd_flags |= SCF_FUA;
2584 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2587 sectors = transport_get_sectors_16(cdb, cmd, §or_ret);
2589 goto out_unsupported_cdb;
2590 size = transport_get_size(sectors, cdb, cmd);
2591 cmd->t_task_lba = transport_lba_64(cdb);
2593 cmd->se_cmd_flags |= SCF_FUA;
2594 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2596 case XDWRITEREAD_10:
2597 if ((cmd->data_direction != DMA_TO_DEVICE) ||
2598 !(cmd->se_cmd_flags & SCF_BIDI))
2599 goto out_invalid_cdb_field;
2600 sectors = transport_get_sectors_10(cdb, cmd, §or_ret);
2602 goto out_unsupported_cdb;
2603 size = transport_get_size(sectors, cdb, cmd);
2604 cmd->t_task_lba = transport_lba_32(cdb);
2605 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2608 * Do now allow BIDI commands for passthrough mode.
2611 goto out_unsupported_cdb;
2614 * Setup BIDI XOR callback to be run after I/O completion.
2616 cmd->transport_complete_callback = &transport_xor_callback;
2618 cmd->se_cmd_flags |= SCF_FUA;
2620 case VARIABLE_LENGTH_CMD:
2621 service_action = get_unaligned_be16(&cdb[8]);
2622 switch (service_action) {
2623 case XDWRITEREAD_32:
2624 sectors = transport_get_sectors_32(cdb, cmd, §or_ret);
2626 goto out_unsupported_cdb;
2627 size = transport_get_size(sectors, cdb, cmd);
2629 * Use WRITE_32 and READ_32 opcodes for the emulated
2630 * XDWRITE_READ_32 logic.
2632 cmd->t_task_lba = transport_lba_64_ext(cdb);
2633 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2636 * Do now allow BIDI commands for passthrough mode.
2639 goto out_unsupported_cdb;
2642 * Setup BIDI XOR callback to be run during after I/O
2645 cmd->transport_complete_callback = &transport_xor_callback;
2647 cmd->se_cmd_flags |= SCF_FUA;
2650 sectors = transport_get_sectors_32(cdb, cmd, §or_ret);
2652 goto out_unsupported_cdb;
2655 size = transport_get_size(1, cdb, cmd);
2657 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
2659 goto out_invalid_cdb_field;
2662 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
2663 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2665 if (target_check_write_same_discard(&cdb[10], dev) < 0)
2666 goto out_invalid_cdb_field;
2668 cmd->execute_task = target_emulate_write_same;
2671 pr_err("VARIABLE_LENGTH_CMD service action"
2672 " 0x%04x not supported\n", service_action);
2673 goto out_unsupported_cdb;
2676 case MAINTENANCE_IN:
2677 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2678 /* MAINTENANCE_IN from SCC-2 */
2680 * Check for emulated MI_REPORT_TARGET_PGS.
2682 if (cdb[1] == MI_REPORT_TARGET_PGS &&
2683 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2685 target_emulate_report_target_port_groups;
2687 size = (cdb[6] << 24) | (cdb[7] << 16) |
2688 (cdb[8] << 8) | cdb[9];
2690 /* GPCMD_SEND_KEY from multi media commands */
2691 size = (cdb[8] << 8) + cdb[9];
2693 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2697 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2699 case MODE_SELECT_10:
2700 size = (cdb[7] << 8) + cdb[8];
2701 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2705 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2707 cmd->execute_task = target_emulate_modesense;
2710 size = (cdb[7] << 8) + cdb[8];
2711 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2713 cmd->execute_task = target_emulate_modesense;
2715 case GPCMD_READ_BUFFER_CAPACITY:
2716 case GPCMD_SEND_OPC:
2719 size = (cdb[7] << 8) + cdb[8];
2720 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2722 case READ_BLOCK_LIMITS:
2723 size = READ_BLOCK_LEN;
2724 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2726 case GPCMD_GET_CONFIGURATION:
2727 case GPCMD_READ_FORMAT_CAPACITIES:
2728 case GPCMD_READ_DISC_INFO:
2729 case GPCMD_READ_TRACK_RZONE_INFO:
2730 size = (cdb[7] << 8) + cdb[8];
2731 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2733 case PERSISTENT_RESERVE_IN:
2734 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2735 cmd->execute_task = target_scsi3_emulate_pr_in;
2736 size = (cdb[7] << 8) + cdb[8];
2737 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2739 case PERSISTENT_RESERVE_OUT:
2740 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2741 cmd->execute_task = target_scsi3_emulate_pr_out;
2742 size = (cdb[7] << 8) + cdb[8];
2743 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2745 case GPCMD_MECHANISM_STATUS:
2746 case GPCMD_READ_DVD_STRUCTURE:
2747 size = (cdb[8] << 8) + cdb[9];
2748 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2751 size = READ_POSITION_LEN;
2752 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2754 case MAINTENANCE_OUT:
2755 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2756 /* MAINTENANCE_OUT from SCC-2
2758 * Check for emulated MO_SET_TARGET_PGS.
2760 if (cdb[1] == MO_SET_TARGET_PGS &&
2761 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2763 target_emulate_set_target_port_groups;
2766 size = (cdb[6] << 24) | (cdb[7] << 16) |
2767 (cdb[8] << 8) | cdb[9];
2769 /* GPCMD_REPORT_KEY from multi media commands */
2770 size = (cdb[8] << 8) + cdb[9];
2772 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2775 size = (cdb[3] << 8) + cdb[4];
2777 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
2778 * See spc4r17 section 5.3
2780 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2781 cmd->sam_task_attr = MSG_HEAD_TAG;
2782 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2784 cmd->execute_task = target_emulate_inquiry;
2787 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2788 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2791 size = READ_CAP_LEN;
2792 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2794 cmd->execute_task = target_emulate_readcapacity;
2796 case READ_MEDIA_SERIAL_NUMBER:
2797 case SECURITY_PROTOCOL_IN:
2798 case SECURITY_PROTOCOL_OUT:
2799 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2800 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2802 case SERVICE_ACTION_IN:
2803 switch (cmd->t_task_cdb[1] & 0x1f) {
2804 case SAI_READ_CAPACITY_16:
2807 target_emulate_readcapacity_16;
2813 pr_err("Unsupported SA: 0x%02x\n",
2814 cmd->t_task_cdb[1] & 0x1f);
2815 goto out_unsupported_cdb;
2818 case ACCESS_CONTROL_IN:
2819 case ACCESS_CONTROL_OUT:
2821 case READ_ATTRIBUTE:
2822 case RECEIVE_COPY_RESULTS:
2823 case WRITE_ATTRIBUTE:
2824 size = (cdb[10] << 24) | (cdb[11] << 16) |
2825 (cdb[12] << 8) | cdb[13];
2826 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2828 case RECEIVE_DIAGNOSTIC:
2829 case SEND_DIAGNOSTIC:
2830 size = (cdb[3] << 8) | cdb[4];
2831 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2833 /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
2836 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2837 size = (2336 * sectors);
2838 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2843 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2847 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2849 cmd->execute_task = target_emulate_request_sense;
2851 case READ_ELEMENT_STATUS:
2852 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
2853 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2856 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2857 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2862 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
2863 * Assume the passthrough or $FABRIC_MOD will tell us about it.
2865 if (cdb[0] == RESERVE_10)
2866 size = (cdb[7] << 8) | cdb[8];
2868 size = cmd->data_length;
2871 * Setup the legacy emulated handler for SPC-2 and
2872 * >= SPC-3 compatible reservation handling (CRH=1)
2873 * Otherwise, we assume the underlying SCSI logic is
2874 * is running in SPC_PASSTHROUGH, and wants reservations
2875 * emulation disabled.
2877 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2878 cmd->execute_task = target_scsi2_reservation_reserve;
2879 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2884 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
2885 * Assume the passthrough or $FABRIC_MOD will tell us about it.
2887 if (cdb[0] == RELEASE_10)
2888 size = (cdb[7] << 8) | cdb[8];
2890 size = cmd->data_length;
2892 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2893 cmd->execute_task = target_scsi2_reservation_release;
2894 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2896 case SYNCHRONIZE_CACHE:
2897 case 0x91: /* SYNCHRONIZE_CACHE_16: */
2899 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
2901 if (cdb[0] == SYNCHRONIZE_CACHE) {
2902 sectors = transport_get_sectors_10(cdb, cmd, §or_ret);
2903 cmd->t_task_lba = transport_lba_32(cdb);
2905 sectors = transport_get_sectors_16(cdb, cmd, §or_ret);
2906 cmd->t_task_lba = transport_lba_64(cdb);
2909 goto out_unsupported_cdb;
2911 size = transport_get_size(sectors, cdb, cmd);
2912 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2918 * Check to ensure that LBA + Range does not exceed past end of
2919 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
2921 if ((cmd->t_task_lba != 0) || (sectors != 0)) {
2922 if (transport_cmd_get_valid_sectors(cmd) < 0)
2923 goto out_invalid_cdb_field;
2925 cmd->execute_task = target_emulate_synchronize_cache;
2928 size = get_unaligned_be16(&cdb[7]);
2929 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2931 cmd->execute_task = target_emulate_unmap;
2934 sectors = transport_get_sectors_16(cdb, cmd, §or_ret);
2936 goto out_unsupported_cdb;
2939 size = transport_get_size(1, cdb, cmd);
2941 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2942 goto out_invalid_cdb_field;
2945 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
2946 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2948 if (target_check_write_same_discard(&cdb[1], dev) < 0)
2949 goto out_invalid_cdb_field;
2951 cmd->execute_task = target_emulate_write_same;
2954 sectors = transport_get_sectors_10(cdb, cmd, §or_ret);
2956 goto out_unsupported_cdb;
2959 size = transport_get_size(1, cdb, cmd);
2961 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2962 goto out_invalid_cdb_field;
2965 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
2966 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2968 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
2969 * of byte 1 bit 3 UNMAP instead of original reserved field
2971 if (target_check_write_same_discard(&cdb[1], dev) < 0)
2972 goto out_invalid_cdb_field;
2974 cmd->execute_task = target_emulate_write_same;
2976 case ALLOW_MEDIUM_REMOVAL:
2982 case TEST_UNIT_READY:
2984 case WRITE_FILEMARKS:
2985 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2987 cmd->execute_task = target_emulate_noop;
2989 case GPCMD_CLOSE_TRACK:
2990 case INITIALIZE_ELEMENT_STATUS:
2991 case GPCMD_LOAD_UNLOAD:
2992 case GPCMD_SET_SPEED:
2994 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2997 cmd->execute_task = target_report_luns;
2998 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3000 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
3001 * See spc4r17 section 5.3
3003 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3004 cmd->sam_task_attr = MSG_HEAD_TAG;
3005 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3008 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
3009 " 0x%02x, sending CHECK_CONDITION.\n",
3010 cmd->se_tfo->get_fabric_name(), cdb[0]);
3011 goto out_unsupported_cdb;
3014 if (size != cmd->data_length) {
3015 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
3016 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
3017 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
3018 cmd->data_length, size, cdb[0]);
3020 cmd->cmd_spdtl = size;
3022 if (cmd->data_direction == DMA_TO_DEVICE) {
3023 pr_err("Rejecting underflow/overflow"
3025 goto out_invalid_cdb_field;
3028 * Reject READ_* or WRITE_* with overflow/underflow for
3029 * type SCF_SCSI_DATA_SG_IO_CDB.
3031 if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
3032 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
3033 " CDB on non 512-byte sector setup subsystem"
3034 " plugin: %s\n", dev->transport->name);
3035 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
3036 goto out_invalid_cdb_field;
3039 if (size > cmd->data_length) {
3040 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
3041 cmd->residual_count = (size - cmd->data_length);
3043 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
3044 cmd->residual_count = (cmd->data_length - size);
3046 cmd->data_length = size;
3049 /* reject any command that we don't have a handler for */
3050 if (!(passthrough || cmd->execute_task ||
3051 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
3052 goto out_unsupported_cdb;
3054 /* Let's limit control cdbs to a page, for simplicity's sake. */
3055 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3057 goto out_invalid_cdb_field;
3059 transport_set_supported_SAM_opcode(cmd);
3062 out_unsupported_cdb:
3063 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3064 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3066 out_invalid_cdb_field:
3067 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3068 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3073 * Called from I/O completion to determine which dormant/delayed
3074 * and ordered cmds need to have their tasks added to the execution queue.
3076 static void transport_complete_task_attr(struct se_cmd *cmd)
3078 struct se_device *dev = cmd->se_dev;
3079 struct se_cmd *cmd_p, *cmd_tmp;
3080 int new_active_tasks = 0;
3082 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3083 atomic_dec(&dev->simple_cmds);
3084 smp_mb__after_atomic_dec();
3085 dev->dev_cur_ordered_id++;
3086 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
3087 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3088 cmd->se_ordered_id);
3089 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3090 dev->dev_cur_ordered_id++;
3091 pr_debug("Incremented dev_cur_ordered_id: %u for"
3092 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3093 cmd->se_ordered_id);
3094 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3095 atomic_dec(&dev->dev_ordered_sync);
3096 smp_mb__after_atomic_dec();
3098 dev->dev_cur_ordered_id++;
3099 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
3100 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3103 * Process all commands up to the last received
3104 * ORDERED task attribute which requires another blocking
3107 spin_lock(&dev->delayed_cmd_lock);
3108 list_for_each_entry_safe(cmd_p, cmd_tmp,
3109 &dev->delayed_cmd_list, se_delayed_node) {
3111 list_del(&cmd_p->se_delayed_node);
3112 spin_unlock(&dev->delayed_cmd_lock);
3114 pr_debug("Calling add_tasks() for"
3115 " cmd_p: 0x%02x Task Attr: 0x%02x"
3116 " Dormant -> Active, se_ordered_id: %u\n",
3117 cmd_p->t_task_cdb[0],
3118 cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3120 transport_add_tasks_from_cmd(cmd_p);
3123 spin_lock(&dev->delayed_cmd_lock);
3124 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3127 spin_unlock(&dev->delayed_cmd_lock);
3129 * If new tasks have become active, wake up the transport thread
3130 * to do the processing of the Active tasks.
3132 if (new_active_tasks != 0)
3133 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
3136 static void transport_complete_qf(struct se_cmd *cmd)
3140 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3141 transport_complete_task_attr(cmd);
3143 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3144 ret = cmd->se_tfo->queue_status(cmd);
3149 switch (cmd->data_direction) {
3150 case DMA_FROM_DEVICE:
3151 ret = cmd->se_tfo->queue_data_in(cmd);
3154 if (cmd->t_bidi_data_sg) {
3155 ret = cmd->se_tfo->queue_data_in(cmd);
3159 /* Fall through for DMA_TO_DEVICE */
3161 ret = cmd->se_tfo->queue_status(cmd);
3169 transport_handle_queue_full(cmd, cmd->se_dev);
3172 transport_lun_remove_cmd(cmd);
3173 transport_cmd_check_stop_to_fabric(cmd);
3176 static void transport_handle_queue_full(
3178 struct se_device *dev)
3180 spin_lock_irq(&dev->qf_cmd_lock);
3181 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
3182 atomic_inc(&dev->dev_qf_count);
3183 smp_mb__after_atomic_inc();
3184 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
3186 schedule_work(&cmd->se_dev->qf_work_queue);
3189 static void target_complete_ok_work(struct work_struct *work)
3191 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3192 int reason = 0, ret;
3195 * Check if we need to move delayed/dormant tasks from cmds on the
3196 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3199 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3200 transport_complete_task_attr(cmd);
3202 * Check to schedule QUEUE_FULL work, or execute an existing
3203 * cmd->transport_qf_callback()
3205 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
3206 schedule_work(&cmd->se_dev->qf_work_queue);
3209 * Check if we need to retrieve a sense buffer from
3210 * the struct se_cmd in question.
3212 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3213 if (transport_get_sense_data(cmd) < 0)
3214 reason = TCM_NON_EXISTENT_LUN;
3217 * Only set when an struct se_task->task_scsi_status returned
3218 * a non GOOD status.
3220 if (cmd->scsi_status) {
3221 ret = transport_send_check_condition_and_sense(
3223 if (ret == -EAGAIN || ret == -ENOMEM)
3226 transport_lun_remove_cmd(cmd);
3227 transport_cmd_check_stop_to_fabric(cmd);
3232 * Check for a callback, used by amongst other things
3233 * XDWRITE_READ_10 emulation.
3235 if (cmd->transport_complete_callback)
3236 cmd->transport_complete_callback(cmd);
3238 switch (cmd->data_direction) {
3239 case DMA_FROM_DEVICE:
3240 spin_lock(&cmd->se_lun->lun_sep_lock);
3241 if (cmd->se_lun->lun_sep) {
3242 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3245 spin_unlock(&cmd->se_lun->lun_sep_lock);
3247 ret = cmd->se_tfo->queue_data_in(cmd);
3248 if (ret == -EAGAIN || ret == -ENOMEM)
3252 spin_lock(&cmd->se_lun->lun_sep_lock);
3253 if (cmd->se_lun->lun_sep) {
3254 cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
3257 spin_unlock(&cmd->se_lun->lun_sep_lock);
3259 * Check if we need to send READ payload for BIDI-COMMAND
3261 if (cmd->t_bidi_data_sg) {
3262 spin_lock(&cmd->se_lun->lun_sep_lock);
3263 if (cmd->se_lun->lun_sep) {
3264 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3267 spin_unlock(&cmd->se_lun->lun_sep_lock);
3268 ret = cmd->se_tfo->queue_data_in(cmd);
3269 if (ret == -EAGAIN || ret == -ENOMEM)
3273 /* Fall through for DMA_TO_DEVICE */
3275 ret = cmd->se_tfo->queue_status(cmd);
3276 if (ret == -EAGAIN || ret == -ENOMEM)
3283 transport_lun_remove_cmd(cmd);
3284 transport_cmd_check_stop_to_fabric(cmd);
3288 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
3289 " data_direction: %d\n", cmd, cmd->data_direction);
3290 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
3291 transport_handle_queue_full(cmd, cmd->se_dev);
3294 static void transport_free_dev_tasks(struct se_cmd *cmd)
3296 struct se_task *task, *task_tmp;
3297 unsigned long flags;
3298 LIST_HEAD(dispose_list);
3300 spin_lock_irqsave(&cmd->t_state_lock, flags);
3301 list_for_each_entry_safe(task, task_tmp,
3302 &cmd->t_task_list, t_list) {
3303 if (!(task->task_flags & TF_ACTIVE))
3304 list_move_tail(&task->t_list, &dispose_list);
3306 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3308 while (!list_empty(&dispose_list)) {
3309 task = list_first_entry(&dispose_list, struct se_task, t_list);
3311 if (task->task_sg != cmd->t_data_sg &&
3312 task->task_sg != cmd->t_bidi_data_sg)
3313 kfree(task->task_sg);
3315 list_del(&task->t_list);
3317 cmd->se_dev->transport->free_task(task);
3321 static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
3323 struct scatterlist *sg;
3326 for_each_sg(sgl, sg, nents, count)
3327 __free_page(sg_page(sg));
3332 static inline void transport_free_pages(struct se_cmd *cmd)
3334 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3337 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
3338 cmd->t_data_sg = NULL;
3339 cmd->t_data_nents = 0;
3341 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
3342 cmd->t_bidi_data_sg = NULL;
3343 cmd->t_bidi_data_nents = 0;
3347 * transport_release_cmd - free a command
3348 * @cmd: command to free
3350 * This routine unconditionally frees a command, and reference counting
3351 * or list removal must be done in the caller.
3353 static void transport_release_cmd(struct se_cmd *cmd)
3355 BUG_ON(!cmd->se_tfo);
3357 if (cmd->se_tmr_req)
3358 core_tmr_release_req(cmd->se_tmr_req);
3359 if (cmd->t_task_cdb != cmd->__t_task_cdb)
3360 kfree(cmd->t_task_cdb);
3362 * Check if target_wait_for_sess_cmds() is expecting to
3363 * release se_cmd directly here..
3365 if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd)
3366 if (cmd->se_tfo->check_release_cmd(cmd) != 0)
3369 cmd->se_tfo->release_cmd(cmd);
3373 * transport_put_cmd - release a reference to a command
3374 * @cmd: command to release
3376 * This routine releases our reference to the command and frees it if possible.
3378 static void transport_put_cmd(struct se_cmd *cmd)
3380 unsigned long flags;
3383 spin_lock_irqsave(&cmd->t_state_lock, flags);
3384 if (atomic_read(&cmd->t_fe_count)) {
3385 if (!atomic_dec_and_test(&cmd->t_fe_count))
3389 if (atomic_read(&cmd->t_se_count)) {
3390 if (!atomic_dec_and_test(&cmd->t_se_count))
3394 if (atomic_read(&cmd->transport_dev_active)) {
3395 atomic_set(&cmd->transport_dev_active, 0);
3396 transport_all_task_dev_remove_state(cmd);
3399 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3401 if (free_tasks != 0)
3402 transport_free_dev_tasks(cmd);
3404 transport_free_pages(cmd);
3405 transport_release_cmd(cmd);
3408 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3412 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
3413 * allocating in the core.
3414 * @cmd: Associated se_cmd descriptor
3415 * @mem: SGL style memory for TCM WRITE / READ
3416 * @sg_mem_num: Number of SGL elements
3417 * @mem_bidi_in: SGL style memory for TCM BIDI READ
3418 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
3420 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
3423 int transport_generic_map_mem_to_cmd(
3425 struct scatterlist *sgl,
3427 struct scatterlist *sgl_bidi,
3430 if (!sgl || !sgl_count)
3433 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3434 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
3436 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
3437 * scatterlists already have been set to follow what the fabric
3438 * passes for the original expected data transfer length.
3440 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
3441 pr_warn("Rejecting SCSI DATA overflow for fabric using"
3442 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
3443 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3444 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3448 cmd->t_data_sg = sgl;
3449 cmd->t_data_nents = sgl_count;
3451 if (sgl_bidi && sgl_bidi_count) {
3452 cmd->t_bidi_data_sg = sgl_bidi;
3453 cmd->t_bidi_data_nents = sgl_bidi_count;
3455 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
3460 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3462 void *transport_kmap_first_data_page(struct se_cmd *cmd)
3464 struct scatterlist *sg = cmd->t_data_sg;
3468 * We need to take into account a possible offset here for fabrics like
3469 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
3470 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
3472 return kmap(sg_page(sg)) + sg->offset;
3474 EXPORT_SYMBOL(transport_kmap_first_data_page);
3476 void transport_kunmap_first_data_page(struct se_cmd *cmd)
3478 kunmap(sg_page(cmd->t_data_sg));
3480 EXPORT_SYMBOL(transport_kunmap_first_data_page);
3483 transport_generic_get_mem(struct se_cmd *cmd)
3485 u32 length = cmd->data_length;
3490 nents = DIV_ROUND_UP(length, PAGE_SIZE);
3491 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
3492 if (!cmd->t_data_sg)
3495 cmd->t_data_nents = nents;
3496 sg_init_table(cmd->t_data_sg, nents);
3499 u32 page_len = min_t(u32, length, PAGE_SIZE);
3500 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3504 sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
3512 __free_page(sg_page(&cmd->t_data_sg[i]));
3515 kfree(cmd->t_data_sg);
3516 cmd->t_data_sg = NULL;
3520 /* Reduce sectors if they are too long for the device */
3521 static inline sector_t transport_limit_task_sectors(
3522 struct se_device *dev,
3523 unsigned long long lba,
3526 sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
3528 if (dev->transport->get_device_type(dev) == TYPE_DISK)
3529 if ((lba + sectors) > transport_dev_end_lba(dev))
3530 sectors = ((transport_dev_end_lba(dev) - lba) + 1);
3537 * This function can be used by HW target mode drivers to create a linked
3538 * scatterlist from all contiguously allocated struct se_task->task_sg[].
3539 * This is intended to be called during the completion path by TCM Core
3540 * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
3542 void transport_do_task_sg_chain(struct se_cmd *cmd)
3544 struct scatterlist *sg_first = NULL;
3545 struct scatterlist *sg_prev = NULL;
3546 int sg_prev_nents = 0;
3547 struct scatterlist *sg;
3548 struct se_task *task;
3549 u32 chained_nents = 0;
3552 BUG_ON(!cmd->se_tfo->task_sg_chaining);
3555 * Walk the struct se_task list and setup scatterlist chains
3556 * for each contiguously allocated struct se_task->task_sg[].
3558 list_for_each_entry(task, &cmd->t_task_list, t_list) {
3563 sg_first = task->task_sg;
3564 chained_nents = task->task_sg_nents;
3566 sg_chain(sg_prev, sg_prev_nents, task->task_sg);
3567 chained_nents += task->task_sg_nents;
3570 * For the padded tasks, use the extra SGL vector allocated
3571 * in transport_allocate_data_tasks() for the sg_prev_nents
3572 * offset into sg_chain() above.
3574 * We do not need the padding for the last task (or a single
3575 * task), but in that case we will never use the sg_prev_nents
3576 * value below which would be incorrect.
3578 sg_prev_nents = (task->task_sg_nents + 1);
3579 sg_prev = task->task_sg;
3582 * Setup the starting pointer and total t_tasks_sg_linked_no including
3583 * padding SGs for linking and to mark the end.
3585 cmd->t_tasks_sg_chained = sg_first;
3586 cmd->t_tasks_sg_chained_no = chained_nents;
3588 pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
3589 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
3590 cmd->t_tasks_sg_chained_no);
3592 for_each_sg(cmd->t_tasks_sg_chained, sg,
3593 cmd->t_tasks_sg_chained_no, i) {
3595 pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
3596 i, sg, sg_page(sg), sg->length, sg->offset);
3597 if (sg_is_chain(sg))
3598 pr_debug("SG: %p sg_is_chain=1\n", sg);
3600 pr_debug("SG: %p sg_is_last=1\n", sg);
3603 EXPORT_SYMBOL(transport_do_task_sg_chain);
3606 * Break up cmd into chunks transport can handle
3609 transport_allocate_data_tasks(struct se_cmd *cmd,
3610 enum dma_data_direction data_direction,
3611 struct scatterlist *cmd_sg, unsigned int sgl_nents)
3613 struct se_device *dev = cmd->se_dev;
3615 unsigned long long lba;
3616 sector_t sectors, dev_max_sectors;
3619 if (transport_cmd_get_valid_sectors(cmd) < 0)
3622 dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
3623 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
3625 WARN_ON(cmd->data_length % sector_size);
3627 lba = cmd->t_task_lba;
3628 sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
3629 task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
3632 * If we need just a single task reuse the SG list in the command
3633 * and avoid a lot of work.
3635 if (task_count == 1) {
3636 struct se_task *task;
3637 unsigned long flags;
3639 task = transport_generic_get_task(cmd, data_direction);
3643 task->task_sg = cmd_sg;
3644 task->task_sg_nents = sgl_nents;
3646 task->task_lba = lba;
3647 task->task_sectors = sectors;
3648 task->task_size = task->task_sectors * sector_size;
3650 spin_lock_irqsave(&cmd->t_state_lock, flags);
3651 list_add_tail(&task->t_list, &cmd->t_task_list);
3652 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3657 for (i = 0; i < task_count; i++) {
3658 struct se_task *task;
3659 unsigned int task_size, task_sg_nents_padded;
3660 struct scatterlist *sg;
3661 unsigned long flags;
3664 task = transport_generic_get_task(cmd, data_direction);
3668 task->task_lba = lba;
3669 task->task_sectors = min(sectors, dev_max_sectors);
3670 task->task_size = task->task_sectors * sector_size;
3673 * This now assumes that passed sg_ents are in PAGE_SIZE chunks
3674 * in order to calculate the number per task SGL entries
3676 task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
3678 * Check if the fabric module driver is requesting that all
3679 * struct se_task->task_sg[] be chained together.. If so,
3680 * then allocate an extra padding SG entry for linking and
3681 * marking the end of the chained SGL for every task except
3682 * the last one for (task_count > 1) operation, or skipping
3683 * the extra padding for the (task_count == 1) case.
3685 if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
3686 task_sg_nents_padded = (task->task_sg_nents + 1);
3688 task_sg_nents_padded = task->task_sg_nents;
3690 task->task_sg = kmalloc(sizeof(struct scatterlist) *
3691 task_sg_nents_padded, GFP_KERNEL);
3692 if (!task->task_sg) {
3693 cmd->se_dev->transport->free_task(task);
3697 sg_init_table(task->task_sg, task_sg_nents_padded);
3699 task_size = task->task_size;
3701 /* Build new sgl, only up to task_size */
3702 for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
3703 if (cmd_sg->length > task_size)
3707 task_size -= cmd_sg->length;
3708 cmd_sg = sg_next(cmd_sg);
3711 lba += task->task_sectors;
3712 sectors -= task->task_sectors;
3714 spin_lock_irqsave(&cmd->t_state_lock, flags);
3715 list_add_tail(&task->t_list, &cmd->t_task_list);
3716 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3723 transport_allocate_control_task(struct se_cmd *cmd)
3725 struct se_task *task;
3726 unsigned long flags;
3728 task = transport_generic_get_task(cmd, cmd->data_direction);
3732 task->task_sg = cmd->t_data_sg;
3733 task->task_size = cmd->data_length;
3734 task->task_sg_nents = cmd->t_data_nents;
3736 spin_lock_irqsave(&cmd->t_state_lock, flags);
3737 list_add_tail(&task->t_list, &cmd->t_task_list);
3738 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3740 /* Success! Return number of tasks allocated */
3745 * Allocate any required ressources to execute the command, and either place
3746 * it on the execution queue if possible. For writes we might not have the
3747 * payload yet, thus notify the fabric via a call to ->write_pending instead.
3749 int transport_generic_new_cmd(struct se_cmd *cmd)
3751 struct se_device *dev = cmd->se_dev;
3752 int task_cdbs, task_cdbs_bidi = 0;
3757 * Determine is the TCM fabric module has already allocated physical
3758 * memory, and is directly calling transport_generic_map_mem_to_cmd()
3761 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
3763 ret = transport_generic_get_mem(cmd);
3769 * For BIDI command set up the read tasks first.
3771 if (cmd->t_bidi_data_sg &&
3772 dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
3773 BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
3775 task_cdbs_bidi = transport_allocate_data_tasks(cmd,
3776 DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
3777 cmd->t_bidi_data_nents);
3778 if (task_cdbs_bidi <= 0)
3781 atomic_inc(&cmd->t_fe_count);
3782 atomic_inc(&cmd->t_se_count);
3786 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3787 task_cdbs = transport_allocate_data_tasks(cmd,
3788 cmd->data_direction, cmd->t_data_sg,
3791 task_cdbs = transport_allocate_control_task(cmd);
3796 else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
3797 cmd->t_state = TRANSPORT_COMPLETE;
3798 atomic_set(&cmd->t_transport_active, 1);
3799 INIT_WORK(&cmd->work, target_complete_ok_work);
3800 queue_work(target_completion_wq, &cmd->work);
3805 atomic_inc(&cmd->t_fe_count);
3806 atomic_inc(&cmd->t_se_count);
3809 cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
3810 atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
3811 atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
3814 * For WRITEs, let the fabric know its buffer is ready..
3815 * This WRITE struct se_cmd (and all of its associated struct se_task's)
3816 * will be added to the struct se_device execution queue after its WRITE
3817 * data has arrived. (ie: It gets handled by the transport processing
3818 * thread a second time)
3820 if (cmd->data_direction == DMA_TO_DEVICE) {
3821 transport_add_tasks_to_state_queue(cmd);
3822 return transport_generic_write_pending(cmd);
3825 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
3826 * to the execution queue.
3828 transport_execute_tasks(cmd);
3832 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3833 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3836 EXPORT_SYMBOL(transport_generic_new_cmd);
3838 /* transport_generic_process_write():
3842 void transport_generic_process_write(struct se_cmd *cmd)
3844 transport_execute_tasks(cmd);
3846 EXPORT_SYMBOL(transport_generic_process_write);
3848 static void transport_write_pending_qf(struct se_cmd *cmd)
3852 ret = cmd->se_tfo->write_pending(cmd);
3853 if (ret == -EAGAIN || ret == -ENOMEM) {
3854 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
3856 transport_handle_queue_full(cmd, cmd->se_dev);
3860 static int transport_generic_write_pending(struct se_cmd *cmd)
3862 unsigned long flags;
3865 spin_lock_irqsave(&cmd->t_state_lock, flags);
3866 cmd->t_state = TRANSPORT_WRITE_PENDING;
3867 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3870 * Clear the se_cmd for WRITE_PENDING status in order to set
3871 * cmd->t_transport_active=0 so that transport_generic_handle_data
3872 * can be called from HW target mode interrupt code. This is safe
3873 * to be called with transport_off=1 before the cmd->se_tfo->write_pending
3874 * because the se_cmd->se_lun pointer is not being cleared.
3876 transport_cmd_check_stop(cmd, 1, 0);
3879 * Call the fabric write_pending function here to let the
3880 * frontend know that WRITE buffers are ready.
3882 ret = cmd->se_tfo->write_pending(cmd);
3883 if (ret == -EAGAIN || ret == -ENOMEM)
3891 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
3892 cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
3893 transport_handle_queue_full(cmd, cmd->se_dev);
3897 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
3899 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
3900 if (wait_for_tasks && cmd->se_tmr_req)
3901 transport_wait_for_tasks(cmd);
3903 transport_release_cmd(cmd);
3906 transport_wait_for_tasks(cmd);
3908 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
3911 transport_lun_remove_cmd(cmd);
3913 transport_free_dev_tasks(cmd);
3915 transport_put_cmd(cmd);
3918 EXPORT_SYMBOL(transport_generic_free_cmd);
3920 /* target_get_sess_cmd - Add command to active ->sess_cmd_list
3921 * @se_sess: session to reference
3922 * @se_cmd: command descriptor to add
3924 void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
3926 unsigned long flags;
3928 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3929 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
3930 se_cmd->check_release = 1;
3931 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3933 EXPORT_SYMBOL(target_get_sess_cmd);
3935 /* target_put_sess_cmd - Check for active I/O shutdown or list delete
3936 * @se_sess: session to reference
3937 * @se_cmd: command descriptor to drop
3939 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
3941 unsigned long flags;
3943 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3944 if (list_empty(&se_cmd->se_cmd_list)) {
3945 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3950 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
3951 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3952 complete(&se_cmd->cmd_wait_comp);
3955 list_del(&se_cmd->se_cmd_list);
3956 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3960 EXPORT_SYMBOL(target_put_sess_cmd);
3962 /* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
3963 * @se_sess: session to split
3965 void target_splice_sess_cmd_list(struct se_session *se_sess)
3967 struct se_cmd *se_cmd;
3968 unsigned long flags;
3970 WARN_ON(!list_empty(&se_sess->sess_wait_list));
3971 INIT_LIST_HEAD(&se_sess->sess_wait_list);
3973 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3974 se_sess->sess_tearing_down = 1;
3976 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
3978 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
3979 se_cmd->cmd_wait_set = 1;
3981 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3983 EXPORT_SYMBOL(target_splice_sess_cmd_list);
3985 /* target_wait_for_sess_cmds - Wait for outstanding descriptors
3986 * @se_sess: session to wait for active I/O
3987 * @wait_for_tasks: Make extra transport_wait_for_tasks call
3989 void target_wait_for_sess_cmds(
3990 struct se_session *se_sess,
3993 struct se_cmd *se_cmd, *tmp_cmd;
3996 list_for_each_entry_safe(se_cmd, tmp_cmd,
3997 &se_sess->sess_wait_list, se_cmd_list) {
3998 list_del(&se_cmd->se_cmd_list);
4000 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
4001 " %d\n", se_cmd, se_cmd->t_state,
4002 se_cmd->se_tfo->get_cmd_state(se_cmd));
4004 if (wait_for_tasks) {
4005 pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
4006 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4007 se_cmd->se_tfo->get_cmd_state(se_cmd));
4009 rc = transport_wait_for_tasks(se_cmd);
4011 pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
4012 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4013 se_cmd->se_tfo->get_cmd_state(se_cmd));
4017 wait_for_completion(&se_cmd->cmd_wait_comp);
4018 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
4019 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4020 se_cmd->se_tfo->get_cmd_state(se_cmd));
4023 se_cmd->se_tfo->release_cmd(se_cmd);
4026 EXPORT_SYMBOL(target_wait_for_sess_cmds);
4028 /* transport_lun_wait_for_tasks():
4030 * Called from ConfigFS context to stop the passed struct se_cmd to allow
4031 * an struct se_lun to be successfully shutdown.
4033 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4035 unsigned long flags;
4038 * If the frontend has already requested this struct se_cmd to
4039 * be stopped, we can safely ignore this struct se_cmd.
4041 spin_lock_irqsave(&cmd->t_state_lock, flags);
4042 if (atomic_read(&cmd->t_transport_stop)) {
4043 atomic_set(&cmd->transport_lun_stop, 0);
4044 pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
4045 " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
4046 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4047 transport_cmd_check_stop(cmd, 1, 0);
4050 atomic_set(&cmd->transport_lun_fe_stop, 1);
4051 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4053 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4055 ret = transport_stop_tasks_for_cmd(cmd);
4057 pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
4058 " %d\n", cmd, cmd->t_task_list_num, ret);
4060 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
4061 cmd->se_tfo->get_task_tag(cmd));
4062 wait_for_completion(&cmd->transport_lun_stop_comp);
4063 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
4064 cmd->se_tfo->get_task_tag(cmd));
4066 transport_remove_cmd_from_queue(cmd);
4071 static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4073 struct se_cmd *cmd = NULL;
4074 unsigned long lun_flags, cmd_flags;
4076 * Do exception processing and return CHECK_CONDITION status to the
4079 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4080 while (!list_empty(&lun->lun_cmd_list)) {
4081 cmd = list_first_entry(&lun->lun_cmd_list,
4082 struct se_cmd, se_lun_node);
4083 list_del(&cmd->se_lun_node);
4085 atomic_set(&cmd->transport_lun_active, 0);
4087 * This will notify iscsi_target_transport.c:
4088 * transport_cmd_check_stop() that a LUN shutdown is in
4089 * progress for the iscsi_cmd_t.
4091 spin_lock(&cmd->t_state_lock);
4092 pr_debug("SE_LUN[%d] - Setting cmd->transport"
4093 "_lun_stop for ITT: 0x%08x\n",
4094 cmd->se_lun->unpacked_lun,
4095 cmd->se_tfo->get_task_tag(cmd));
4096 atomic_set(&cmd->transport_lun_stop, 1);
4097 spin_unlock(&cmd->t_state_lock);
4099 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4102 pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
4103 cmd->se_tfo->get_task_tag(cmd),
4104 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4108 * If the Storage engine still owns the iscsi_cmd_t, determine
4109 * and/or stop its context.
4111 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
4112 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
4113 cmd->se_tfo->get_task_tag(cmd));
4115 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
4116 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4120 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
4121 "_wait_for_tasks(): SUCCESS\n",
4122 cmd->se_lun->unpacked_lun,
4123 cmd->se_tfo->get_task_tag(cmd));
4125 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4126 if (!atomic_read(&cmd->transport_dev_active)) {
4127 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4130 atomic_set(&cmd->transport_dev_active, 0);
4131 transport_all_task_dev_remove_state(cmd);
4132 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4134 transport_free_dev_tasks(cmd);
4136 * The Storage engine stopped this struct se_cmd before it was
4137 * send to the fabric frontend for delivery back to the
4138 * Initiator Node. Return this SCSI CDB back with an
4139 * CHECK_CONDITION status.
4142 transport_send_check_condition_and_sense(cmd,
4143 TCM_NON_EXISTENT_LUN, 0);
4145 * If the fabric frontend is waiting for this iscsi_cmd_t to
4146 * be released, notify the waiting thread now that LU has
4147 * finished accessing it.
4149 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4150 if (atomic_read(&cmd->transport_lun_fe_stop)) {
4151 pr_debug("SE_LUN[%d] - Detected FE stop for"
4152 " struct se_cmd: %p ITT: 0x%08x\n",
4154 cmd, cmd->se_tfo->get_task_tag(cmd));
4156 spin_unlock_irqrestore(&cmd->t_state_lock,
4158 transport_cmd_check_stop(cmd, 1, 0);
4159 complete(&cmd->transport_lun_fe_stop_comp);
4160 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4163 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
4164 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
4166 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4167 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4169 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4172 static int transport_clear_lun_thread(void *p)
4174 struct se_lun *lun = (struct se_lun *)p;
4176 __transport_clear_lun_from_sessions(lun);
4177 complete(&lun->lun_shutdown_comp);
4182 int transport_clear_lun_from_sessions(struct se_lun *lun)
4184 struct task_struct *kt;
4186 kt = kthread_run(transport_clear_lun_thread, lun,
4187 "tcm_cl_%u", lun->unpacked_lun);
4189 pr_err("Unable to start clear_lun thread\n");
4192 wait_for_completion(&lun->lun_shutdown_comp);
4198 * transport_wait_for_tasks - wait for completion to occur
4199 * @cmd: command to wait
4201 * Called from frontend fabric context to wait for storage engine
4202 * to pause and/or release frontend generated struct se_cmd.
4204 bool transport_wait_for_tasks(struct se_cmd *cmd)
4206 unsigned long flags;
4208 spin_lock_irqsave(&cmd->t_state_lock, flags);
4209 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
4210 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4214 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
4215 * has been set in transport_set_supported_SAM_opcode().
4217 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
4218 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4222 * If we are already stopped due to an external event (ie: LUN shutdown)
4223 * sleep until the connection can have the passed struct se_cmd back.
4224 * The cmd->transport_lun_stopped_sem will be upped by
4225 * transport_clear_lun_from_sessions() once the ConfigFS context caller
4226 * has completed its operation on the struct se_cmd.
4228 if (atomic_read(&cmd->transport_lun_stop)) {
4230 pr_debug("wait_for_tasks: Stopping"
4231 " wait_for_completion(&cmd->t_tasktransport_lun_fe"
4232 "_stop_comp); for ITT: 0x%08x\n",
4233 cmd->se_tfo->get_task_tag(cmd));
4235 * There is a special case for WRITES where a FE exception +
4236 * LUN shutdown means ConfigFS context is still sleeping on
4237 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
4238 * We go ahead and up transport_lun_stop_comp just to be sure
4241 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4242 complete(&cmd->transport_lun_stop_comp);
4243 wait_for_completion(&cmd->transport_lun_fe_stop_comp);
4244 spin_lock_irqsave(&cmd->t_state_lock, flags);
4246 transport_all_task_dev_remove_state(cmd);
4248 * At this point, the frontend who was the originator of this
4249 * struct se_cmd, now owns the structure and can be released through
4250 * normal means below.
4252 pr_debug("wait_for_tasks: Stopped"
4253 " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
4254 "stop_comp); for ITT: 0x%08x\n",
4255 cmd->se_tfo->get_task_tag(cmd));
4257 atomic_set(&cmd->transport_lun_stop, 0);
4259 if (!atomic_read(&cmd->t_transport_active) ||
4260 atomic_read(&cmd->t_transport_aborted)) {
4261 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4265 atomic_set(&cmd->t_transport_stop, 1);
4267 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
4268 " i_state: %d, t_state: %d, t_transport_stop = TRUE\n",
4269 cmd, cmd->se_tfo->get_task_tag(cmd),
4270 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4272 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4274 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4276 wait_for_completion(&cmd->t_transport_stop_comp);
4278 spin_lock_irqsave(&cmd->t_state_lock, flags);
4279 atomic_set(&cmd->t_transport_active, 0);
4280 atomic_set(&cmd->t_transport_stop, 0);
4282 pr_debug("wait_for_tasks: Stopped wait_for_compltion("
4283 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
4284 cmd->se_tfo->get_task_tag(cmd));
4286 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4290 EXPORT_SYMBOL(transport_wait_for_tasks);
4292 static int transport_get_sense_codes(
4297 *asc = cmd->scsi_asc;
4298 *ascq = cmd->scsi_ascq;
4303 static int transport_set_sense_codes(
4308 cmd->scsi_asc = asc;
4309 cmd->scsi_ascq = ascq;
4314 int transport_send_check_condition_and_sense(
4319 unsigned char *buffer = cmd->sense_buffer;
4320 unsigned long flags;
4322 u8 asc = 0, ascq = 0;
4324 spin_lock_irqsave(&cmd->t_state_lock, flags);
4325 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4326 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4329 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
4330 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4332 if (!reason && from_transport)
4335 if (!from_transport)
4336 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
4338 * Data Segment and SenseLength of the fabric response PDU.
4340 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
4341 * from include/scsi/scsi_cmnd.h
4343 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
4344 TRANSPORT_SENSE_BUFFER);
4346 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
4347 * SENSE KEY values from include/scsi/scsi.h
4350 case TCM_NON_EXISTENT_LUN:
4352 buffer[offset] = 0x70;
4353 /* ILLEGAL REQUEST */
4354 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4355 /* LOGICAL UNIT NOT SUPPORTED */
4356 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
4358 case TCM_UNSUPPORTED_SCSI_OPCODE:
4359 case TCM_SECTOR_COUNT_TOO_MANY:
4361 buffer[offset] = 0x70;
4362 /* ILLEGAL REQUEST */
4363 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4364 /* INVALID COMMAND OPERATION CODE */
4365 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
4367 case TCM_UNKNOWN_MODE_PAGE:
4369 buffer[offset] = 0x70;
4370 /* ILLEGAL REQUEST */
4371 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4372 /* INVALID FIELD IN CDB */
4373 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4375 case TCM_CHECK_CONDITION_ABORT_CMD:
4377 buffer[offset] = 0x70;
4378 /* ABORTED COMMAND */
4379 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4380 /* BUS DEVICE RESET FUNCTION OCCURRED */
4381 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
4382 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
4384 case TCM_INCORRECT_AMOUNT_OF_DATA:
4386 buffer[offset] = 0x70;
4387 /* ABORTED COMMAND */
4388 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4390 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4391 /* NOT ENOUGH UNSOLICITED DATA */
4392 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
4394 case TCM_INVALID_CDB_FIELD:
4396 buffer[offset] = 0x70;
4397 /* ABORTED COMMAND */
4398 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4399 /* INVALID FIELD IN CDB */
4400 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4402 case TCM_INVALID_PARAMETER_LIST:
4404 buffer[offset] = 0x70;
4405 /* ABORTED COMMAND */
4406 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4407 /* INVALID FIELD IN PARAMETER LIST */
4408 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
4410 case TCM_UNEXPECTED_UNSOLICITED_DATA:
4412 buffer[offset] = 0x70;
4413 /* ABORTED COMMAND */
4414 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4416 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4417 /* UNEXPECTED_UNSOLICITED_DATA */
4418 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
4420 case TCM_SERVICE_CRC_ERROR:
4422 buffer[offset] = 0x70;
4423 /* ABORTED COMMAND */
4424 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4425 /* PROTOCOL SERVICE CRC ERROR */
4426 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
4428 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
4430 case TCM_SNACK_REJECTED:
4432 buffer[offset] = 0x70;
4433 /* ABORTED COMMAND */
4434 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4436 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
4437 /* FAILED RETRANSMISSION REQUEST */
4438 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
4440 case TCM_WRITE_PROTECTED:
4442 buffer[offset] = 0x70;
4444 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
4445 /* WRITE PROTECTED */
4446 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
4448 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
4450 buffer[offset] = 0x70;
4451 /* UNIT ATTENTION */
4452 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
4453 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
4454 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4455 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4457 case TCM_CHECK_CONDITION_NOT_READY:
4459 buffer[offset] = 0x70;
4461 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
4462 transport_get_sense_codes(cmd, &asc, &ascq);
4463 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4464 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4466 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
4469 buffer[offset] = 0x70;
4470 /* ILLEGAL REQUEST */
4471 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4472 /* LOGICAL UNIT COMMUNICATION FAILURE */
4473 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
4477 * This code uses linux/include/scsi/scsi.h SAM status codes!
4479 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
4481 * Automatically padded, this value is encoded in the fabric's
4482 * data_length response PDU containing the SCSI defined sense data.
4484 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
4487 return cmd->se_tfo->queue_status(cmd);
4489 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
4491 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
4495 if (atomic_read(&cmd->t_transport_aborted) != 0) {
4497 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
4500 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
4501 " status for CDB: 0x%02x ITT: 0x%08x\n",
4503 cmd->se_tfo->get_task_tag(cmd));
4505 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
4506 cmd->se_tfo->queue_status(cmd);
4511 EXPORT_SYMBOL(transport_check_aborted_status);
4513 void transport_send_task_abort(struct se_cmd *cmd)
4515 unsigned long flags;
4517 spin_lock_irqsave(&cmd->t_state_lock, flags);
4518 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4519 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4522 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4525 * If there are still expected incoming fabric WRITEs, we wait
4526 * until until they have completed before sending a TASK_ABORTED
4527 * response. This response with TASK_ABORTED status will be
4528 * queued back to fabric module by transport_check_aborted_status().
4530 if (cmd->data_direction == DMA_TO_DEVICE) {
4531 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
4532 atomic_inc(&cmd->t_transport_aborted);
4533 smp_mb__after_atomic_inc();
4536 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4538 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
4539 " ITT: 0x%08x\n", cmd->t_task_cdb[0],
4540 cmd->se_tfo->get_task_tag(cmd));
4542 cmd->se_tfo->queue_status(cmd);
4545 static int transport_generic_do_tmr(struct se_cmd *cmd)
4547 struct se_device *dev = cmd->se_dev;
4548 struct se_tmr_req *tmr = cmd->se_tmr_req;
4551 switch (tmr->function) {
4552 case TMR_ABORT_TASK:
4553 tmr->response = TMR_FUNCTION_REJECTED;
4555 case TMR_ABORT_TASK_SET:
4557 case TMR_CLEAR_TASK_SET:
4558 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
4561 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
4562 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
4563 TMR_FUNCTION_REJECTED;
4565 case TMR_TARGET_WARM_RESET:
4566 tmr->response = TMR_FUNCTION_REJECTED;
4568 case TMR_TARGET_COLD_RESET:
4569 tmr->response = TMR_FUNCTION_REJECTED;
4572 pr_err("Uknown TMR function: 0x%02x.\n",
4574 tmr->response = TMR_FUNCTION_REJECTED;
4578 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
4579 cmd->se_tfo->queue_tm_rsp(cmd);
4581 transport_cmd_check_stop_to_fabric(cmd);
4585 /* transport_processing_thread():
4589 static int transport_processing_thread(void *param)
4593 struct se_device *dev = (struct se_device *) param;
4595 while (!kthread_should_stop()) {
4596 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
4597 atomic_read(&dev->dev_queue_obj.queue_cnt) ||
4598 kthread_should_stop());
4603 __transport_execute_tasks(dev);
4605 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
4609 switch (cmd->t_state) {
4610 case TRANSPORT_NEW_CMD:
4613 case TRANSPORT_NEW_CMD_MAP:
4614 if (!cmd->se_tfo->new_cmd_map) {
4615 pr_err("cmd->se_tfo->new_cmd_map is"
4616 " NULL for TRANSPORT_NEW_CMD_MAP\n");
4619 ret = cmd->se_tfo->new_cmd_map(cmd);
4621 transport_generic_request_failure(cmd);
4624 ret = transport_generic_new_cmd(cmd);
4626 transport_generic_request_failure(cmd);
4630 case TRANSPORT_PROCESS_WRITE:
4631 transport_generic_process_write(cmd);
4633 case TRANSPORT_PROCESS_TMR:
4634 transport_generic_do_tmr(cmd);
4636 case TRANSPORT_COMPLETE_QF_WP:
4637 transport_write_pending_qf(cmd);
4639 case TRANSPORT_COMPLETE_QF_OK:
4640 transport_complete_qf(cmd);
4643 pr_err("Unknown t_state: %d for ITT: 0x%08x "
4644 "i_state: %d on SE LUN: %u\n",
4646 cmd->se_tfo->get_task_tag(cmd),
4647 cmd->se_tfo->get_cmd_state(cmd),
4648 cmd->se_lun->unpacked_lun);
4656 WARN_ON(!list_empty(&dev->state_task_list));
4657 WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
4658 dev->process_thread = NULL;