]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/target/target_core_transport.c
target: remove the se_ordered_node se_cmd field
[karo-tx-linux.git] / drivers / target / target_core_transport.c
1 /*******************************************************************************
2  * Filename:  target_core_transport.c
3  *
4  * This file contains the Generic Target Engine Core.
5  *
6  * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <nab@kernel.org>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28
29 #include <linux/net.h>
30 #include <linux/delay.h>
31 #include <linux/string.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/blkdev.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
37 #include <linux/in.h>
38 #include <linux/cdrom.h>
39 #include <linux/module.h>
40 #include <asm/unaligned.h>
41 #include <net/sock.h>
42 #include <net/tcp.h>
43 #include <scsi/scsi.h>
44 #include <scsi/scsi_cmnd.h>
45 #include <scsi/scsi_tcq.h>
46
47 #include <target/target_core_base.h>
48 #include <target/target_core_device.h>
49 #include <target/target_core_tmr.h>
50 #include <target/target_core_tpg.h>
51 #include <target/target_core_transport.h>
52 #include <target/target_core_fabric_ops.h>
53 #include <target/target_core_configfs.h>
54
55 #include "target_core_alua.h"
56 #include "target_core_cdb.h"
57 #include "target_core_hba.h"
58 #include "target_core_pr.h"
59 #include "target_core_ua.h"
60
61 static int sub_api_initialized;
62
63 static struct workqueue_struct *target_completion_wq;
64 static struct kmem_cache *se_sess_cache;
65 struct kmem_cache *se_tmr_req_cache;
66 struct kmem_cache *se_ua_cache;
67 struct kmem_cache *t10_pr_reg_cache;
68 struct kmem_cache *t10_alua_lu_gp_cache;
69 struct kmem_cache *t10_alua_lu_gp_mem_cache;
70 struct kmem_cache *t10_alua_tg_pt_gp_cache;
71 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
72
73 static int transport_generic_write_pending(struct se_cmd *);
74 static int transport_processing_thread(void *param);
75 static int __transport_execute_tasks(struct se_device *dev);
76 static void transport_complete_task_attr(struct se_cmd *cmd);
77 static void transport_handle_queue_full(struct se_cmd *cmd,
78                 struct se_device *dev);
79 static void transport_free_dev_tasks(struct se_cmd *cmd);
80 static int transport_generic_get_mem(struct se_cmd *cmd);
81 static void transport_put_cmd(struct se_cmd *cmd);
82 static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
83 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
84 static void transport_generic_request_failure(struct se_cmd *);
85 static void target_complete_ok_work(struct work_struct *work);
86
87 int init_se_kmem_caches(void)
88 {
89         se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
90                         sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
91                         0, NULL);
92         if (!se_tmr_req_cache) {
93                 pr_err("kmem_cache_create() for struct se_tmr_req"
94                                 " failed\n");
95                 goto out;
96         }
97         se_sess_cache = kmem_cache_create("se_sess_cache",
98                         sizeof(struct se_session), __alignof__(struct se_session),
99                         0, NULL);
100         if (!se_sess_cache) {
101                 pr_err("kmem_cache_create() for struct se_session"
102                                 " failed\n");
103                 goto out_free_tmr_req_cache;
104         }
105         se_ua_cache = kmem_cache_create("se_ua_cache",
106                         sizeof(struct se_ua), __alignof__(struct se_ua),
107                         0, NULL);
108         if (!se_ua_cache) {
109                 pr_err("kmem_cache_create() for struct se_ua failed\n");
110                 goto out_free_sess_cache;
111         }
112         t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
113                         sizeof(struct t10_pr_registration),
114                         __alignof__(struct t10_pr_registration), 0, NULL);
115         if (!t10_pr_reg_cache) {
116                 pr_err("kmem_cache_create() for struct t10_pr_registration"
117                                 " failed\n");
118                 goto out_free_ua_cache;
119         }
120         t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
121                         sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
122                         0, NULL);
123         if (!t10_alua_lu_gp_cache) {
124                 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
125                                 " failed\n");
126                 goto out_free_pr_reg_cache;
127         }
128         t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
129                         sizeof(struct t10_alua_lu_gp_member),
130                         __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
131         if (!t10_alua_lu_gp_mem_cache) {
132                 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
133                                 "cache failed\n");
134                 goto out_free_lu_gp_cache;
135         }
136         t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
137                         sizeof(struct t10_alua_tg_pt_gp),
138                         __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
139         if (!t10_alua_tg_pt_gp_cache) {
140                 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
141                                 "cache failed\n");
142                 goto out_free_lu_gp_mem_cache;
143         }
144         t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
145                         "t10_alua_tg_pt_gp_mem_cache",
146                         sizeof(struct t10_alua_tg_pt_gp_member),
147                         __alignof__(struct t10_alua_tg_pt_gp_member),
148                         0, NULL);
149         if (!t10_alua_tg_pt_gp_mem_cache) {
150                 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
151                                 "mem_t failed\n");
152                 goto out_free_tg_pt_gp_cache;
153         }
154
155         target_completion_wq = alloc_workqueue("target_completion",
156                                                WQ_MEM_RECLAIM, 0);
157         if (!target_completion_wq)
158                 goto out_free_tg_pt_gp_mem_cache;
159
160         return 0;
161
162 out_free_tg_pt_gp_mem_cache:
163         kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
164 out_free_tg_pt_gp_cache:
165         kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
166 out_free_lu_gp_mem_cache:
167         kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
168 out_free_lu_gp_cache:
169         kmem_cache_destroy(t10_alua_lu_gp_cache);
170 out_free_pr_reg_cache:
171         kmem_cache_destroy(t10_pr_reg_cache);
172 out_free_ua_cache:
173         kmem_cache_destroy(se_ua_cache);
174 out_free_sess_cache:
175         kmem_cache_destroy(se_sess_cache);
176 out_free_tmr_req_cache:
177         kmem_cache_destroy(se_tmr_req_cache);
178 out:
179         return -ENOMEM;
180 }
181
182 void release_se_kmem_caches(void)
183 {
184         destroy_workqueue(target_completion_wq);
185         kmem_cache_destroy(se_tmr_req_cache);
186         kmem_cache_destroy(se_sess_cache);
187         kmem_cache_destroy(se_ua_cache);
188         kmem_cache_destroy(t10_pr_reg_cache);
189         kmem_cache_destroy(t10_alua_lu_gp_cache);
190         kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
191         kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
192         kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
193 }
194
195 /* This code ensures unique mib indexes are handed out. */
196 static DEFINE_SPINLOCK(scsi_mib_index_lock);
197 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
198
199 /*
200  * Allocate a new row index for the entry type specified
201  */
202 u32 scsi_get_new_index(scsi_index_t type)
203 {
204         u32 new_index;
205
206         BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
207
208         spin_lock(&scsi_mib_index_lock);
209         new_index = ++scsi_mib_index[type];
210         spin_unlock(&scsi_mib_index_lock);
211
212         return new_index;
213 }
214
215 void transport_init_queue_obj(struct se_queue_obj *qobj)
216 {
217         atomic_set(&qobj->queue_cnt, 0);
218         INIT_LIST_HEAD(&qobj->qobj_list);
219         init_waitqueue_head(&qobj->thread_wq);
220         spin_lock_init(&qobj->cmd_queue_lock);
221 }
222 EXPORT_SYMBOL(transport_init_queue_obj);
223
224 void transport_subsystem_check_init(void)
225 {
226         int ret;
227
228         if (sub_api_initialized)
229                 return;
230
231         ret = request_module("target_core_iblock");
232         if (ret != 0)
233                 pr_err("Unable to load target_core_iblock\n");
234
235         ret = request_module("target_core_file");
236         if (ret != 0)
237                 pr_err("Unable to load target_core_file\n");
238
239         ret = request_module("target_core_pscsi");
240         if (ret != 0)
241                 pr_err("Unable to load target_core_pscsi\n");
242
243         ret = request_module("target_core_stgt");
244         if (ret != 0)
245                 pr_err("Unable to load target_core_stgt\n");
246
247         sub_api_initialized = 1;
248         return;
249 }
250
251 struct se_session *transport_init_session(void)
252 {
253         struct se_session *se_sess;
254
255         se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
256         if (!se_sess) {
257                 pr_err("Unable to allocate struct se_session from"
258                                 " se_sess_cache\n");
259                 return ERR_PTR(-ENOMEM);
260         }
261         INIT_LIST_HEAD(&se_sess->sess_list);
262         INIT_LIST_HEAD(&se_sess->sess_acl_list);
263         INIT_LIST_HEAD(&se_sess->sess_cmd_list);
264         INIT_LIST_HEAD(&se_sess->sess_wait_list);
265         spin_lock_init(&se_sess->sess_cmd_lock);
266
267         return se_sess;
268 }
269 EXPORT_SYMBOL(transport_init_session);
270
271 /*
272  * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
273  */
274 void __transport_register_session(
275         struct se_portal_group *se_tpg,
276         struct se_node_acl *se_nacl,
277         struct se_session *se_sess,
278         void *fabric_sess_ptr)
279 {
280         unsigned char buf[PR_REG_ISID_LEN];
281
282         se_sess->se_tpg = se_tpg;
283         se_sess->fabric_sess_ptr = fabric_sess_ptr;
284         /*
285          * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
286          *
287          * Only set for struct se_session's that will actually be moving I/O.
288          * eg: *NOT* discovery sessions.
289          */
290         if (se_nacl) {
291                 /*
292                  * If the fabric module supports an ISID based TransportID,
293                  * save this value in binary from the fabric I_T Nexus now.
294                  */
295                 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
296                         memset(&buf[0], 0, PR_REG_ISID_LEN);
297                         se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
298                                         &buf[0], PR_REG_ISID_LEN);
299                         se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
300                 }
301                 spin_lock_irq(&se_nacl->nacl_sess_lock);
302                 /*
303                  * The se_nacl->nacl_sess pointer will be set to the
304                  * last active I_T Nexus for each struct se_node_acl.
305                  */
306                 se_nacl->nacl_sess = se_sess;
307
308                 list_add_tail(&se_sess->sess_acl_list,
309                               &se_nacl->acl_sess_list);
310                 spin_unlock_irq(&se_nacl->nacl_sess_lock);
311         }
312         list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
313
314         pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
315                 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
316 }
317 EXPORT_SYMBOL(__transport_register_session);
318
319 void transport_register_session(
320         struct se_portal_group *se_tpg,
321         struct se_node_acl *se_nacl,
322         struct se_session *se_sess,
323         void *fabric_sess_ptr)
324 {
325         spin_lock_bh(&se_tpg->session_lock);
326         __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
327         spin_unlock_bh(&se_tpg->session_lock);
328 }
329 EXPORT_SYMBOL(transport_register_session);
330
331 void transport_deregister_session_configfs(struct se_session *se_sess)
332 {
333         struct se_node_acl *se_nacl;
334         unsigned long flags;
335         /*
336          * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
337          */
338         se_nacl = se_sess->se_node_acl;
339         if (se_nacl) {
340                 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
341                 list_del(&se_sess->sess_acl_list);
342                 /*
343                  * If the session list is empty, then clear the pointer.
344                  * Otherwise, set the struct se_session pointer from the tail
345                  * element of the per struct se_node_acl active session list.
346                  */
347                 if (list_empty(&se_nacl->acl_sess_list))
348                         se_nacl->nacl_sess = NULL;
349                 else {
350                         se_nacl->nacl_sess = container_of(
351                                         se_nacl->acl_sess_list.prev,
352                                         struct se_session, sess_acl_list);
353                 }
354                 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
355         }
356 }
357 EXPORT_SYMBOL(transport_deregister_session_configfs);
358
359 void transport_free_session(struct se_session *se_sess)
360 {
361         kmem_cache_free(se_sess_cache, se_sess);
362 }
363 EXPORT_SYMBOL(transport_free_session);
364
365 void transport_deregister_session(struct se_session *se_sess)
366 {
367         struct se_portal_group *se_tpg = se_sess->se_tpg;
368         struct se_node_acl *se_nacl;
369         unsigned long flags;
370
371         if (!se_tpg) {
372                 transport_free_session(se_sess);
373                 return;
374         }
375
376         spin_lock_irqsave(&se_tpg->session_lock, flags);
377         list_del(&se_sess->sess_list);
378         se_sess->se_tpg = NULL;
379         se_sess->fabric_sess_ptr = NULL;
380         spin_unlock_irqrestore(&se_tpg->session_lock, flags);
381
382         /*
383          * Determine if we need to do extra work for this initiator node's
384          * struct se_node_acl if it had been previously dynamically generated.
385          */
386         se_nacl = se_sess->se_node_acl;
387         if (se_nacl) {
388                 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
389                 if (se_nacl->dynamic_node_acl) {
390                         if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
391                                         se_tpg)) {
392                                 list_del(&se_nacl->acl_list);
393                                 se_tpg->num_node_acls--;
394                                 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
395
396                                 core_tpg_wait_for_nacl_pr_ref(se_nacl);
397                                 core_free_device_list_for_node(se_nacl, se_tpg);
398                                 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
399                                                 se_nacl);
400                                 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
401                         }
402                 }
403                 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
404         }
405
406         transport_free_session(se_sess);
407
408         pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
409                 se_tpg->se_tpg_tfo->get_fabric_name());
410 }
411 EXPORT_SYMBOL(transport_deregister_session);
412
413 /*
414  * Called with cmd->t_state_lock held.
415  */
416 static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
417 {
418         struct se_device *dev = cmd->se_dev;
419         struct se_task *task;
420         unsigned long flags;
421
422         if (!dev)
423                 return;
424
425         list_for_each_entry(task, &cmd->t_task_list, t_list) {
426                 if (task->task_flags & TF_ACTIVE)
427                         continue;
428
429                 if (!atomic_read(&task->task_state_active))
430                         continue;
431
432                 spin_lock_irqsave(&dev->execute_task_lock, flags);
433                 list_del(&task->t_state_list);
434                 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
435                         cmd->se_tfo->get_task_tag(cmd), dev, task);
436                 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
437
438                 atomic_set(&task->task_state_active, 0);
439                 atomic_dec(&cmd->t_task_cdbs_ex_left);
440         }
441 }
442
443 /*      transport_cmd_check_stop():
444  *
445  *      'transport_off = 1' determines if t_transport_active should be cleared.
446  *      'transport_off = 2' determines if task_dev_state should be removed.
447  *
448  *      A non-zero u8 t_state sets cmd->t_state.
449  *      Returns 1 when command is stopped, else 0.
450  */
451 static int transport_cmd_check_stop(
452         struct se_cmd *cmd,
453         int transport_off,
454         u8 t_state)
455 {
456         unsigned long flags;
457
458         spin_lock_irqsave(&cmd->t_state_lock, flags);
459         /*
460          * Determine if IOCTL context caller in requesting the stopping of this
461          * command for LUN shutdown purposes.
462          */
463         if (atomic_read(&cmd->transport_lun_stop)) {
464                 pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
465                         " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
466                         cmd->se_tfo->get_task_tag(cmd));
467
468                 atomic_set(&cmd->t_transport_active, 0);
469                 if (transport_off == 2)
470                         transport_all_task_dev_remove_state(cmd);
471                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
472
473                 complete(&cmd->transport_lun_stop_comp);
474                 return 1;
475         }
476         /*
477          * Determine if frontend context caller is requesting the stopping of
478          * this command for frontend exceptions.
479          */
480         if (atomic_read(&cmd->t_transport_stop)) {
481                 pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
482                         " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
483                         cmd->se_tfo->get_task_tag(cmd));
484
485                 if (transport_off == 2)
486                         transport_all_task_dev_remove_state(cmd);
487
488                 /*
489                  * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
490                  * to FE.
491                  */
492                 if (transport_off == 2)
493                         cmd->se_lun = NULL;
494                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
495
496                 complete(&cmd->t_transport_stop_comp);
497                 return 1;
498         }
499         if (transport_off) {
500                 atomic_set(&cmd->t_transport_active, 0);
501                 if (transport_off == 2) {
502                         transport_all_task_dev_remove_state(cmd);
503                         /*
504                          * Clear struct se_cmd->se_lun before the transport_off == 2
505                          * handoff to fabric module.
506                          */
507                         cmd->se_lun = NULL;
508                         /*
509                          * Some fabric modules like tcm_loop can release
510                          * their internally allocated I/O reference now and
511                          * struct se_cmd now.
512                          *
513                          * Fabric modules are expected to return '1' here if the
514                          * se_cmd being passed is released at this point,
515                          * or zero if not being released.
516                          */
517                         if (cmd->se_tfo->check_stop_free != NULL) {
518                                 spin_unlock_irqrestore(
519                                         &cmd->t_state_lock, flags);
520
521                                 return cmd->se_tfo->check_stop_free(cmd);
522                         }
523                 }
524                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
525
526                 return 0;
527         } else if (t_state)
528                 cmd->t_state = t_state;
529         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
530
531         return 0;
532 }
533
534 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
535 {
536         return transport_cmd_check_stop(cmd, 2, 0);
537 }
538
539 static void transport_lun_remove_cmd(struct se_cmd *cmd)
540 {
541         struct se_lun *lun = cmd->se_lun;
542         unsigned long flags;
543
544         if (!lun)
545                 return;
546
547         spin_lock_irqsave(&cmd->t_state_lock, flags);
548         if (!atomic_read(&cmd->transport_dev_active)) {
549                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
550                 goto check_lun;
551         }
552         atomic_set(&cmd->transport_dev_active, 0);
553         transport_all_task_dev_remove_state(cmd);
554         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
555
556
557 check_lun:
558         spin_lock_irqsave(&lun->lun_cmd_lock, flags);
559         if (atomic_read(&cmd->transport_lun_active)) {
560                 list_del(&cmd->se_lun_node);
561                 atomic_set(&cmd->transport_lun_active, 0);
562 #if 0
563                 pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
564                         cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
565 #endif
566         }
567         spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
568 }
569
570 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
571 {
572         if (!cmd->se_tmr_req)
573                 transport_lun_remove_cmd(cmd);
574
575         if (transport_cmd_check_stop_to_fabric(cmd))
576                 return;
577         if (remove) {
578                 transport_remove_cmd_from_queue(cmd);
579                 transport_put_cmd(cmd);
580         }
581 }
582
583 static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
584                 bool at_head)
585 {
586         struct se_device *dev = cmd->se_dev;
587         struct se_queue_obj *qobj = &dev->dev_queue_obj;
588         unsigned long flags;
589
590         if (t_state) {
591                 spin_lock_irqsave(&cmd->t_state_lock, flags);
592                 cmd->t_state = t_state;
593                 atomic_set(&cmd->t_transport_active, 1);
594                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
595         }
596
597         spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
598
599         /* If the cmd is already on the list, remove it before we add it */
600         if (!list_empty(&cmd->se_queue_node))
601                 list_del(&cmd->se_queue_node);
602         else
603                 atomic_inc(&qobj->queue_cnt);
604
605         if (at_head)
606                 list_add(&cmd->se_queue_node, &qobj->qobj_list);
607         else
608                 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
609         atomic_set(&cmd->t_transport_queue_active, 1);
610         spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
611
612         wake_up_interruptible(&qobj->thread_wq);
613 }
614
615 static struct se_cmd *
616 transport_get_cmd_from_queue(struct se_queue_obj *qobj)
617 {
618         struct se_cmd *cmd;
619         unsigned long flags;
620
621         spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
622         if (list_empty(&qobj->qobj_list)) {
623                 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
624                 return NULL;
625         }
626         cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
627
628         atomic_set(&cmd->t_transport_queue_active, 0);
629
630         list_del_init(&cmd->se_queue_node);
631         atomic_dec(&qobj->queue_cnt);
632         spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
633
634         return cmd;
635 }
636
637 static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
638 {
639         struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
640         unsigned long flags;
641
642         spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
643         if (!atomic_read(&cmd->t_transport_queue_active)) {
644                 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
645                 return;
646         }
647         atomic_set(&cmd->t_transport_queue_active, 0);
648         atomic_dec(&qobj->queue_cnt);
649         list_del_init(&cmd->se_queue_node);
650         spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
651
652         if (atomic_read(&cmd->t_transport_queue_active)) {
653                 pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
654                         cmd->se_tfo->get_task_tag(cmd),
655                         atomic_read(&cmd->t_transport_queue_active));
656         }
657 }
658
659 /*
660  * Completion function used by TCM subsystem plugins (such as FILEIO)
661  * for queueing up response from struct se_subsystem_api->do_task()
662  */
663 void transport_complete_sync_cache(struct se_cmd *cmd, int good)
664 {
665         struct se_task *task = list_entry(cmd->t_task_list.next,
666                                 struct se_task, t_list);
667
668         if (good) {
669                 cmd->scsi_status = SAM_STAT_GOOD;
670                 task->task_scsi_status = GOOD;
671         } else {
672                 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
673                 task->task_se_cmd->scsi_sense_reason =
674                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
675
676         }
677
678         transport_complete_task(task, good);
679 }
680 EXPORT_SYMBOL(transport_complete_sync_cache);
681
682 static void target_complete_failure_work(struct work_struct *work)
683 {
684         struct se_cmd *cmd = container_of(work, struct se_cmd, work);
685
686         transport_generic_request_failure(cmd);
687 }
688
689 /*      transport_complete_task():
690  *
691  *      Called from interrupt and non interrupt context depending
692  *      on the transport plugin.
693  */
694 void transport_complete_task(struct se_task *task, int success)
695 {
696         struct se_cmd *cmd = task->task_se_cmd;
697         struct se_device *dev = cmd->se_dev;
698         unsigned long flags;
699 #if 0
700         pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
701                         cmd->t_task_cdb[0], dev);
702 #endif
703         if (dev)
704                 atomic_inc(&dev->depth_left);
705
706         spin_lock_irqsave(&cmd->t_state_lock, flags);
707         task->task_flags &= ~TF_ACTIVE;
708
709         /*
710          * See if any sense data exists, if so set the TASK_SENSE flag.
711          * Also check for any other post completion work that needs to be
712          * done by the plugins.
713          */
714         if (dev && dev->transport->transport_complete) {
715                 if (dev->transport->transport_complete(task) != 0) {
716                         cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
717                         task->task_sense = 1;
718                         success = 1;
719                 }
720         }
721
722         /*
723          * See if we are waiting for outstanding struct se_task
724          * to complete for an exception condition
725          */
726         if (task->task_flags & TF_REQUEST_STOP) {
727                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
728                 complete(&task->task_stop_comp);
729                 return;
730         }
731
732         if (!success)
733                 cmd->t_tasks_failed = 1;
734
735         /*
736          * Decrement the outstanding t_task_cdbs_left count.  The last
737          * struct se_task from struct se_cmd will complete itself into the
738          * device queue depending upon int success.
739          */
740         if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
741                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
742                 return;
743         }
744
745         if (cmd->t_tasks_failed) {
746                 if (!task->task_error_status) {
747                         task->task_error_status =
748                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
749                         cmd->scsi_sense_reason =
750                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
751                 }
752
753                 INIT_WORK(&cmd->work, target_complete_failure_work);
754         } else {
755                 atomic_set(&cmd->t_transport_complete, 1);
756                 INIT_WORK(&cmd->work, target_complete_ok_work);
757         }
758
759         cmd->t_state = TRANSPORT_COMPLETE;
760         atomic_set(&cmd->t_transport_active, 1);
761         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
762
763         queue_work(target_completion_wq, &cmd->work);
764 }
765 EXPORT_SYMBOL(transport_complete_task);
766
767 /*
768  * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
769  * struct se_task list are ready to be added to the active execution list
770  * struct se_device
771
772  * Called with se_dev_t->execute_task_lock called.
773  */
774 static inline int transport_add_task_check_sam_attr(
775         struct se_task *task,
776         struct se_task *task_prev,
777         struct se_device *dev)
778 {
779         /*
780          * No SAM Task attribute emulation enabled, add to tail of
781          * execution queue
782          */
783         if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
784                 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
785                 return 0;
786         }
787         /*
788          * HEAD_OF_QUEUE attribute for received CDB, which means
789          * the first task that is associated with a struct se_cmd goes to
790          * head of the struct se_device->execute_task_list, and task_prev
791          * after that for each subsequent task
792          */
793         if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
794                 list_add(&task->t_execute_list,
795                                 (task_prev != NULL) ?
796                                 &task_prev->t_execute_list :
797                                 &dev->execute_task_list);
798
799                 pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
800                                 " in execution queue\n",
801                                 task->task_se_cmd->t_task_cdb[0]);
802                 return 1;
803         }
804         /*
805          * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
806          * transitioned from Dermant -> Active state, and are added to the end
807          * of the struct se_device->execute_task_list
808          */
809         list_add_tail(&task->t_execute_list, &dev->execute_task_list);
810         return 0;
811 }
812
813 /*      __transport_add_task_to_execute_queue():
814  *
815  *      Called with se_dev_t->execute_task_lock called.
816  */
817 static void __transport_add_task_to_execute_queue(
818         struct se_task *task,
819         struct se_task *task_prev,
820         struct se_device *dev)
821 {
822         int head_of_queue;
823
824         head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
825         atomic_inc(&dev->execute_tasks);
826
827         if (atomic_read(&task->task_state_active))
828                 return;
829         /*
830          * Determine if this task needs to go to HEAD_OF_QUEUE for the
831          * state list as well.  Running with SAM Task Attribute emulation
832          * will always return head_of_queue == 0 here
833          */
834         if (head_of_queue)
835                 list_add(&task->t_state_list, (task_prev) ?
836                                 &task_prev->t_state_list :
837                                 &dev->state_task_list);
838         else
839                 list_add_tail(&task->t_state_list, &dev->state_task_list);
840
841         atomic_set(&task->task_state_active, 1);
842
843         pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
844                 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
845                 task, dev);
846 }
847
848 static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
849 {
850         struct se_device *dev = cmd->se_dev;
851         struct se_task *task;
852         unsigned long flags;
853
854         spin_lock_irqsave(&cmd->t_state_lock, flags);
855         list_for_each_entry(task, &cmd->t_task_list, t_list) {
856                 if (atomic_read(&task->task_state_active))
857                         continue;
858
859                 spin_lock(&dev->execute_task_lock);
860                 list_add_tail(&task->t_state_list, &dev->state_task_list);
861                 atomic_set(&task->task_state_active, 1);
862
863                 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
864                         task->task_se_cmd->se_tfo->get_task_tag(
865                         task->task_se_cmd), task, dev);
866
867                 spin_unlock(&dev->execute_task_lock);
868         }
869         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
870 }
871
872 static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
873 {
874         struct se_device *dev = cmd->se_dev;
875         struct se_task *task, *task_prev = NULL;
876         unsigned long flags;
877
878         spin_lock_irqsave(&dev->execute_task_lock, flags);
879         list_for_each_entry(task, &cmd->t_task_list, t_list) {
880                 if (!list_empty(&task->t_execute_list))
881                         continue;
882                 /*
883                  * __transport_add_task_to_execute_queue() handles the
884                  * SAM Task Attribute emulation if enabled
885                  */
886                 __transport_add_task_to_execute_queue(task, task_prev, dev);
887                 task_prev = task;
888         }
889         spin_unlock_irqrestore(&dev->execute_task_lock, flags);
890 }
891
892 void __transport_remove_task_from_execute_queue(struct se_task *task,
893                 struct se_device *dev)
894 {
895         list_del_init(&task->t_execute_list);
896         atomic_dec(&dev->execute_tasks);
897 }
898
899 void transport_remove_task_from_execute_queue(
900         struct se_task *task,
901         struct se_device *dev)
902 {
903         unsigned long flags;
904
905         if (WARN_ON(list_empty(&task->t_execute_list)))
906                 return;
907
908         spin_lock_irqsave(&dev->execute_task_lock, flags);
909         __transport_remove_task_from_execute_queue(task, dev);
910         spin_unlock_irqrestore(&dev->execute_task_lock, flags);
911 }
912
913 /*
914  * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
915  */
916
917 static void target_qf_do_work(struct work_struct *work)
918 {
919         struct se_device *dev = container_of(work, struct se_device,
920                                         qf_work_queue);
921         LIST_HEAD(qf_cmd_list);
922         struct se_cmd *cmd, *cmd_tmp;
923
924         spin_lock_irq(&dev->qf_cmd_lock);
925         list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
926         spin_unlock_irq(&dev->qf_cmd_lock);
927
928         list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
929                 list_del(&cmd->se_qf_node);
930                 atomic_dec(&dev->dev_qf_count);
931                 smp_mb__after_atomic_dec();
932
933                 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
934                         " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
935                         (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
936                         (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
937                         : "UNKNOWN");
938
939                 transport_add_cmd_to_queue(cmd, cmd->t_state, true);
940         }
941 }
942
943 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
944 {
945         switch (cmd->data_direction) {
946         case DMA_NONE:
947                 return "NONE";
948         case DMA_FROM_DEVICE:
949                 return "READ";
950         case DMA_TO_DEVICE:
951                 return "WRITE";
952         case DMA_BIDIRECTIONAL:
953                 return "BIDI";
954         default:
955                 break;
956         }
957
958         return "UNKNOWN";
959 }
960
961 void transport_dump_dev_state(
962         struct se_device *dev,
963         char *b,
964         int *bl)
965 {
966         *bl += sprintf(b + *bl, "Status: ");
967         switch (dev->dev_status) {
968         case TRANSPORT_DEVICE_ACTIVATED:
969                 *bl += sprintf(b + *bl, "ACTIVATED");
970                 break;
971         case TRANSPORT_DEVICE_DEACTIVATED:
972                 *bl += sprintf(b + *bl, "DEACTIVATED");
973                 break;
974         case TRANSPORT_DEVICE_SHUTDOWN:
975                 *bl += sprintf(b + *bl, "SHUTDOWN");
976                 break;
977         case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
978         case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
979                 *bl += sprintf(b + *bl, "OFFLINE");
980                 break;
981         default:
982                 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
983                 break;
984         }
985
986         *bl += sprintf(b + *bl, "  Execute/Left/Max Queue Depth: %d/%d/%d",
987                 atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
988                 dev->queue_depth);
989         *bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
990                 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
991         *bl += sprintf(b + *bl, "        ");
992 }
993
994 void transport_dump_vpd_proto_id(
995         struct t10_vpd *vpd,
996         unsigned char *p_buf,
997         int p_buf_len)
998 {
999         unsigned char buf[VPD_TMP_BUF_SIZE];
1000         int len;
1001
1002         memset(buf, 0, VPD_TMP_BUF_SIZE);
1003         len = sprintf(buf, "T10 VPD Protocol Identifier: ");
1004
1005         switch (vpd->protocol_identifier) {
1006         case 0x00:
1007                 sprintf(buf+len, "Fibre Channel\n");
1008                 break;
1009         case 0x10:
1010                 sprintf(buf+len, "Parallel SCSI\n");
1011                 break;
1012         case 0x20:
1013                 sprintf(buf+len, "SSA\n");
1014                 break;
1015         case 0x30:
1016                 sprintf(buf+len, "IEEE 1394\n");
1017                 break;
1018         case 0x40:
1019                 sprintf(buf+len, "SCSI Remote Direct Memory Access"
1020                                 " Protocol\n");
1021                 break;
1022         case 0x50:
1023                 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1024                 break;
1025         case 0x60:
1026                 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1027                 break;
1028         case 0x70:
1029                 sprintf(buf+len, "Automation/Drive Interface Transport"
1030                                 " Protocol\n");
1031                 break;
1032         case 0x80:
1033                 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1034                 break;
1035         default:
1036                 sprintf(buf+len, "Unknown 0x%02x\n",
1037                                 vpd->protocol_identifier);
1038                 break;
1039         }
1040
1041         if (p_buf)
1042                 strncpy(p_buf, buf, p_buf_len);
1043         else
1044                 pr_debug("%s", buf);
1045 }
1046
1047 void
1048 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1049 {
1050         /*
1051          * Check if the Protocol Identifier Valid (PIV) bit is set..
1052          *
1053          * from spc3r23.pdf section 7.5.1
1054          */
1055          if (page_83[1] & 0x80) {
1056                 vpd->protocol_identifier = (page_83[0] & 0xf0);
1057                 vpd->protocol_identifier_set = 1;
1058                 transport_dump_vpd_proto_id(vpd, NULL, 0);
1059         }
1060 }
1061 EXPORT_SYMBOL(transport_set_vpd_proto_id);
1062
1063 int transport_dump_vpd_assoc(
1064         struct t10_vpd *vpd,
1065         unsigned char *p_buf,
1066         int p_buf_len)
1067 {
1068         unsigned char buf[VPD_TMP_BUF_SIZE];
1069         int ret = 0;
1070         int len;
1071
1072         memset(buf, 0, VPD_TMP_BUF_SIZE);
1073         len = sprintf(buf, "T10 VPD Identifier Association: ");
1074
1075         switch (vpd->association) {
1076         case 0x00:
1077                 sprintf(buf+len, "addressed logical unit\n");
1078                 break;
1079         case 0x10:
1080                 sprintf(buf+len, "target port\n");
1081                 break;
1082         case 0x20:
1083                 sprintf(buf+len, "SCSI target device\n");
1084                 break;
1085         default:
1086                 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1087                 ret = -EINVAL;
1088                 break;
1089         }
1090
1091         if (p_buf)
1092                 strncpy(p_buf, buf, p_buf_len);
1093         else
1094                 pr_debug("%s", buf);
1095
1096         return ret;
1097 }
1098
1099 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1100 {
1101         /*
1102          * The VPD identification association..
1103          *
1104          * from spc3r23.pdf Section 7.6.3.1 Table 297
1105          */
1106         vpd->association = (page_83[1] & 0x30);
1107         return transport_dump_vpd_assoc(vpd, NULL, 0);
1108 }
1109 EXPORT_SYMBOL(transport_set_vpd_assoc);
1110
1111 int transport_dump_vpd_ident_type(
1112         struct t10_vpd *vpd,
1113         unsigned char *p_buf,
1114         int p_buf_len)
1115 {
1116         unsigned char buf[VPD_TMP_BUF_SIZE];
1117         int ret = 0;
1118         int len;
1119
1120         memset(buf, 0, VPD_TMP_BUF_SIZE);
1121         len = sprintf(buf, "T10 VPD Identifier Type: ");
1122
1123         switch (vpd->device_identifier_type) {
1124         case 0x00:
1125                 sprintf(buf+len, "Vendor specific\n");
1126                 break;
1127         case 0x01:
1128                 sprintf(buf+len, "T10 Vendor ID based\n");
1129                 break;
1130         case 0x02:
1131                 sprintf(buf+len, "EUI-64 based\n");
1132                 break;
1133         case 0x03:
1134                 sprintf(buf+len, "NAA\n");
1135                 break;
1136         case 0x04:
1137                 sprintf(buf+len, "Relative target port identifier\n");
1138                 break;
1139         case 0x08:
1140                 sprintf(buf+len, "SCSI name string\n");
1141                 break;
1142         default:
1143                 sprintf(buf+len, "Unsupported: 0x%02x\n",
1144                                 vpd->device_identifier_type);
1145                 ret = -EINVAL;
1146                 break;
1147         }
1148
1149         if (p_buf) {
1150                 if (p_buf_len < strlen(buf)+1)
1151                         return -EINVAL;
1152                 strncpy(p_buf, buf, p_buf_len);
1153         } else {
1154                 pr_debug("%s", buf);
1155         }
1156
1157         return ret;
1158 }
1159
1160 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1161 {
1162         /*
1163          * The VPD identifier type..
1164          *
1165          * from spc3r23.pdf Section 7.6.3.1 Table 298
1166          */
1167         vpd->device_identifier_type = (page_83[1] & 0x0f);
1168         return transport_dump_vpd_ident_type(vpd, NULL, 0);
1169 }
1170 EXPORT_SYMBOL(transport_set_vpd_ident_type);
1171
1172 int transport_dump_vpd_ident(
1173         struct t10_vpd *vpd,
1174         unsigned char *p_buf,
1175         int p_buf_len)
1176 {
1177         unsigned char buf[VPD_TMP_BUF_SIZE];
1178         int ret = 0;
1179
1180         memset(buf, 0, VPD_TMP_BUF_SIZE);
1181
1182         switch (vpd->device_identifier_code_set) {
1183         case 0x01: /* Binary */
1184                 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1185                         &vpd->device_identifier[0]);
1186                 break;
1187         case 0x02: /* ASCII */
1188                 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1189                         &vpd->device_identifier[0]);
1190                 break;
1191         case 0x03: /* UTF-8 */
1192                 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1193                         &vpd->device_identifier[0]);
1194                 break;
1195         default:
1196                 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1197                         " 0x%02x", vpd->device_identifier_code_set);
1198                 ret = -EINVAL;
1199                 break;
1200         }
1201
1202         if (p_buf)
1203                 strncpy(p_buf, buf, p_buf_len);
1204         else
1205                 pr_debug("%s", buf);
1206
1207         return ret;
1208 }
1209
1210 int
1211 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1212 {
1213         static const char hex_str[] = "0123456789abcdef";
1214         int j = 0, i = 4; /* offset to start of the identifer */
1215
1216         /*
1217          * The VPD Code Set (encoding)
1218          *
1219          * from spc3r23.pdf Section 7.6.3.1 Table 296
1220          */
1221         vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1222         switch (vpd->device_identifier_code_set) {
1223         case 0x01: /* Binary */
1224                 vpd->device_identifier[j++] =
1225                                 hex_str[vpd->device_identifier_type];
1226                 while (i < (4 + page_83[3])) {
1227                         vpd->device_identifier[j++] =
1228                                 hex_str[(page_83[i] & 0xf0) >> 4];
1229                         vpd->device_identifier[j++] =
1230                                 hex_str[page_83[i] & 0x0f];
1231                         i++;
1232                 }
1233                 break;
1234         case 0x02: /* ASCII */
1235         case 0x03: /* UTF-8 */
1236                 while (i < (4 + page_83[3]))
1237                         vpd->device_identifier[j++] = page_83[i++];
1238                 break;
1239         default:
1240                 break;
1241         }
1242
1243         return transport_dump_vpd_ident(vpd, NULL, 0);
1244 }
1245 EXPORT_SYMBOL(transport_set_vpd_ident);
1246
1247 static void core_setup_task_attr_emulation(struct se_device *dev)
1248 {
1249         /*
1250          * If this device is from Target_Core_Mod/pSCSI, disable the
1251          * SAM Task Attribute emulation.
1252          *
1253          * This is currently not available in upsream Linux/SCSI Target
1254          * mode code, and is assumed to be disabled while using TCM/pSCSI.
1255          */
1256         if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1257                 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1258                 return;
1259         }
1260
1261         dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1262         pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1263                 " device\n", dev->transport->name,
1264                 dev->transport->get_device_rev(dev));
1265 }
1266
1267 static void scsi_dump_inquiry(struct se_device *dev)
1268 {
1269         struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1270         int i, device_type;
1271         /*
1272          * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1273          */
1274         pr_debug("  Vendor: ");
1275         for (i = 0; i < 8; i++)
1276                 if (wwn->vendor[i] >= 0x20)
1277                         pr_debug("%c", wwn->vendor[i]);
1278                 else
1279                         pr_debug(" ");
1280
1281         pr_debug("  Model: ");
1282         for (i = 0; i < 16; i++)
1283                 if (wwn->model[i] >= 0x20)
1284                         pr_debug("%c", wwn->model[i]);
1285                 else
1286                         pr_debug(" ");
1287
1288         pr_debug("  Revision: ");
1289         for (i = 0; i < 4; i++)
1290                 if (wwn->revision[i] >= 0x20)
1291                         pr_debug("%c", wwn->revision[i]);
1292                 else
1293                         pr_debug(" ");
1294
1295         pr_debug("\n");
1296
1297         device_type = dev->transport->get_device_type(dev);
1298         pr_debug("  Type:   %s ", scsi_device_type(device_type));
1299         pr_debug("                 ANSI SCSI revision: %02x\n",
1300                                 dev->transport->get_device_rev(dev));
1301 }
1302
1303 struct se_device *transport_add_device_to_core_hba(
1304         struct se_hba *hba,
1305         struct se_subsystem_api *transport,
1306         struct se_subsystem_dev *se_dev,
1307         u32 device_flags,
1308         void *transport_dev,
1309         struct se_dev_limits *dev_limits,
1310         const char *inquiry_prod,
1311         const char *inquiry_rev)
1312 {
1313         int force_pt;
1314         struct se_device  *dev;
1315
1316         dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1317         if (!dev) {
1318                 pr_err("Unable to allocate memory for se_dev_t\n");
1319                 return NULL;
1320         }
1321
1322         transport_init_queue_obj(&dev->dev_queue_obj);
1323         dev->dev_flags          = device_flags;
1324         dev->dev_status         |= TRANSPORT_DEVICE_DEACTIVATED;
1325         dev->dev_ptr            = transport_dev;
1326         dev->se_hba             = hba;
1327         dev->se_sub_dev         = se_dev;
1328         dev->transport          = transport;
1329         atomic_set(&dev->active_cmds, 0);
1330         INIT_LIST_HEAD(&dev->dev_list);
1331         INIT_LIST_HEAD(&dev->dev_sep_list);
1332         INIT_LIST_HEAD(&dev->dev_tmr_list);
1333         INIT_LIST_HEAD(&dev->execute_task_list);
1334         INIT_LIST_HEAD(&dev->delayed_cmd_list);
1335         INIT_LIST_HEAD(&dev->state_task_list);
1336         INIT_LIST_HEAD(&dev->qf_cmd_list);
1337         spin_lock_init(&dev->execute_task_lock);
1338         spin_lock_init(&dev->delayed_cmd_lock);
1339         spin_lock_init(&dev->state_task_lock);
1340         spin_lock_init(&dev->dev_alua_lock);
1341         spin_lock_init(&dev->dev_reservation_lock);
1342         spin_lock_init(&dev->dev_status_lock);
1343         spin_lock_init(&dev->dev_status_thr_lock);
1344         spin_lock_init(&dev->se_port_lock);
1345         spin_lock_init(&dev->se_tmr_lock);
1346         spin_lock_init(&dev->qf_cmd_lock);
1347
1348         dev->queue_depth        = dev_limits->queue_depth;
1349         atomic_set(&dev->depth_left, dev->queue_depth);
1350         atomic_set(&dev->dev_ordered_id, 0);
1351
1352         se_dev_set_default_attribs(dev, dev_limits);
1353
1354         dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1355         dev->creation_time = get_jiffies_64();
1356         spin_lock_init(&dev->stats_lock);
1357
1358         spin_lock(&hba->device_lock);
1359         list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1360         hba->dev_count++;
1361         spin_unlock(&hba->device_lock);
1362         /*
1363          * Setup the SAM Task Attribute emulation for struct se_device
1364          */
1365         core_setup_task_attr_emulation(dev);
1366         /*
1367          * Force PR and ALUA passthrough emulation with internal object use.
1368          */
1369         force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1370         /*
1371          * Setup the Reservations infrastructure for struct se_device
1372          */
1373         core_setup_reservations(dev, force_pt);
1374         /*
1375          * Setup the Asymmetric Logical Unit Assignment for struct se_device
1376          */
1377         if (core_setup_alua(dev, force_pt) < 0)
1378                 goto out;
1379
1380         /*
1381          * Startup the struct se_device processing thread
1382          */
1383         dev->process_thread = kthread_run(transport_processing_thread, dev,
1384                                           "LIO_%s", dev->transport->name);
1385         if (IS_ERR(dev->process_thread)) {
1386                 pr_err("Unable to create kthread: LIO_%s\n",
1387                         dev->transport->name);
1388                 goto out;
1389         }
1390         /*
1391          * Setup work_queue for QUEUE_FULL
1392          */
1393         INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1394         /*
1395          * Preload the initial INQUIRY const values if we are doing
1396          * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1397          * passthrough because this is being provided by the backend LLD.
1398          * This is required so that transport_get_inquiry() copies these
1399          * originals once back into DEV_T10_WWN(dev) for the virtual device
1400          * setup.
1401          */
1402         if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1403                 if (!inquiry_prod || !inquiry_rev) {
1404                         pr_err("All non TCM/pSCSI plugins require"
1405                                 " INQUIRY consts\n");
1406                         goto out;
1407                 }
1408
1409                 strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1410                 strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
1411                 strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1412         }
1413         scsi_dump_inquiry(dev);
1414
1415         return dev;
1416 out:
1417         kthread_stop(dev->process_thread);
1418
1419         spin_lock(&hba->device_lock);
1420         list_del(&dev->dev_list);
1421         hba->dev_count--;
1422         spin_unlock(&hba->device_lock);
1423
1424         se_release_vpd_for_dev(dev);
1425
1426         kfree(dev);
1427
1428         return NULL;
1429 }
1430 EXPORT_SYMBOL(transport_add_device_to_core_hba);
1431
1432 /*      transport_generic_prepare_cdb():
1433  *
1434  *      Since the Initiator sees iSCSI devices as LUNs,  the SCSI CDB will
1435  *      contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
1436  *      The point of this is since we are mapping iSCSI LUNs to
1437  *      SCSI Target IDs having a non-zero LUN in the CDB will throw the
1438  *      devices and HBAs for a loop.
1439  */
1440 static inline void transport_generic_prepare_cdb(
1441         unsigned char *cdb)
1442 {
1443         switch (cdb[0]) {
1444         case READ_10: /* SBC - RDProtect */
1445         case READ_12: /* SBC - RDProtect */
1446         case READ_16: /* SBC - RDProtect */
1447         case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1448         case VERIFY: /* SBC - VRProtect */
1449         case VERIFY_16: /* SBC - VRProtect */
1450         case WRITE_VERIFY: /* SBC - VRProtect */
1451         case WRITE_VERIFY_12: /* SBC - VRProtect */
1452                 break;
1453         default:
1454                 cdb[1] &= 0x1f; /* clear logical unit number */
1455                 break;
1456         }
1457 }
1458
1459 static struct se_task *
1460 transport_generic_get_task(struct se_cmd *cmd,
1461                 enum dma_data_direction data_direction)
1462 {
1463         struct se_task *task;
1464         struct se_device *dev = cmd->se_dev;
1465
1466         task = dev->transport->alloc_task(cmd->t_task_cdb);
1467         if (!task) {
1468                 pr_err("Unable to allocate struct se_task\n");
1469                 return NULL;
1470         }
1471
1472         INIT_LIST_HEAD(&task->t_list);
1473         INIT_LIST_HEAD(&task->t_execute_list);
1474         INIT_LIST_HEAD(&task->t_state_list);
1475         init_completion(&task->task_stop_comp);
1476         task->task_se_cmd = cmd;
1477         task->task_data_direction = data_direction;
1478
1479         return task;
1480 }
1481
1482 static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1483
1484 /*
1485  * Used by fabric modules containing a local struct se_cmd within their
1486  * fabric dependent per I/O descriptor.
1487  */
1488 void transport_init_se_cmd(
1489         struct se_cmd *cmd,
1490         struct target_core_fabric_ops *tfo,
1491         struct se_session *se_sess,
1492         u32 data_length,
1493         int data_direction,
1494         int task_attr,
1495         unsigned char *sense_buffer)
1496 {
1497         INIT_LIST_HEAD(&cmd->se_lun_node);
1498         INIT_LIST_HEAD(&cmd->se_delayed_node);
1499         INIT_LIST_HEAD(&cmd->se_qf_node);
1500         INIT_LIST_HEAD(&cmd->se_queue_node);
1501         INIT_LIST_HEAD(&cmd->se_cmd_list);
1502         INIT_LIST_HEAD(&cmd->t_task_list);
1503         init_completion(&cmd->transport_lun_fe_stop_comp);
1504         init_completion(&cmd->transport_lun_stop_comp);
1505         init_completion(&cmd->t_transport_stop_comp);
1506         init_completion(&cmd->cmd_wait_comp);
1507         spin_lock_init(&cmd->t_state_lock);
1508         atomic_set(&cmd->transport_dev_active, 1);
1509
1510         cmd->se_tfo = tfo;
1511         cmd->se_sess = se_sess;
1512         cmd->data_length = data_length;
1513         cmd->data_direction = data_direction;
1514         cmd->sam_task_attr = task_attr;
1515         cmd->sense_buffer = sense_buffer;
1516 }
1517 EXPORT_SYMBOL(transport_init_se_cmd);
1518
1519 static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1520 {
1521         /*
1522          * Check if SAM Task Attribute emulation is enabled for this
1523          * struct se_device storage object
1524          */
1525         if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1526                 return 0;
1527
1528         if (cmd->sam_task_attr == MSG_ACA_TAG) {
1529                 pr_debug("SAM Task Attribute ACA"
1530                         " emulation is not supported\n");
1531                 return -EINVAL;
1532         }
1533         /*
1534          * Used to determine when ORDERED commands should go from
1535          * Dormant to Active status.
1536          */
1537         cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1538         smp_mb__after_atomic_inc();
1539         pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1540                         cmd->se_ordered_id, cmd->sam_task_attr,
1541                         cmd->se_dev->transport->name);
1542         return 0;
1543 }
1544
1545 /*      transport_generic_allocate_tasks():
1546  *
1547  *      Called from fabric RX Thread.
1548  */
1549 int transport_generic_allocate_tasks(
1550         struct se_cmd *cmd,
1551         unsigned char *cdb)
1552 {
1553         int ret;
1554
1555         transport_generic_prepare_cdb(cdb);
1556         /*
1557          * Ensure that the received CDB is less than the max (252 + 8) bytes
1558          * for VARIABLE_LENGTH_CMD
1559          */
1560         if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1561                 pr_err("Received SCSI CDB with command_size: %d that"
1562                         " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1563                         scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1564                 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1565                 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1566                 return -EINVAL;
1567         }
1568         /*
1569          * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1570          * allocate the additional extended CDB buffer now..  Otherwise
1571          * setup the pointer from __t_task_cdb to t_task_cdb.
1572          */
1573         if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1574                 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1575                                                 GFP_KERNEL);
1576                 if (!cmd->t_task_cdb) {
1577                         pr_err("Unable to allocate cmd->t_task_cdb"
1578                                 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1579                                 scsi_command_size(cdb),
1580                                 (unsigned long)sizeof(cmd->__t_task_cdb));
1581                         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1582                         cmd->scsi_sense_reason =
1583                                         TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1584                         return -ENOMEM;
1585                 }
1586         } else
1587                 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1588         /*
1589          * Copy the original CDB into cmd->
1590          */
1591         memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1592         /*
1593          * Setup the received CDB based on SCSI defined opcodes and
1594          * perform unit attention, persistent reservations and ALUA
1595          * checks for virtual device backends.  The cmd->t_task_cdb
1596          * pointer is expected to be setup before we reach this point.
1597          */
1598         ret = transport_generic_cmd_sequencer(cmd, cdb);
1599         if (ret < 0)
1600                 return ret;
1601         /*
1602          * Check for SAM Task Attribute Emulation
1603          */
1604         if (transport_check_alloc_task_attr(cmd) < 0) {
1605                 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1606                 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1607                 return -EINVAL;
1608         }
1609         spin_lock(&cmd->se_lun->lun_sep_lock);
1610         if (cmd->se_lun->lun_sep)
1611                 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1612         spin_unlock(&cmd->se_lun->lun_sep_lock);
1613         return 0;
1614 }
1615 EXPORT_SYMBOL(transport_generic_allocate_tasks);
1616
1617 /*
1618  * Used by fabric module frontends to queue tasks directly.
1619  * Many only be used from process context only
1620  */
1621 int transport_handle_cdb_direct(
1622         struct se_cmd *cmd)
1623 {
1624         int ret;
1625
1626         if (!cmd->se_lun) {
1627                 dump_stack();
1628                 pr_err("cmd->se_lun is NULL\n");
1629                 return -EINVAL;
1630         }
1631         if (in_interrupt()) {
1632                 dump_stack();
1633                 pr_err("transport_generic_handle_cdb cannot be called"
1634                                 " from interrupt context\n");
1635                 return -EINVAL;
1636         }
1637         /*
1638          * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
1639          * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
1640          * in existing usage to ensure that outstanding descriptors are handled
1641          * correctly during shutdown via transport_wait_for_tasks()
1642          *
1643          * Also, we don't take cmd->t_state_lock here as we only expect
1644          * this to be called for initial descriptor submission.
1645          */
1646         cmd->t_state = TRANSPORT_NEW_CMD;
1647         atomic_set(&cmd->t_transport_active, 1);
1648         /*
1649          * transport_generic_new_cmd() is already handling QUEUE_FULL,
1650          * so follow TRANSPORT_NEW_CMD processing thread context usage
1651          * and call transport_generic_request_failure() if necessary..
1652          */
1653         ret = transport_generic_new_cmd(cmd);
1654         if (ret < 0)
1655                 transport_generic_request_failure(cmd);
1656
1657         return 0;
1658 }
1659 EXPORT_SYMBOL(transport_handle_cdb_direct);
1660
1661 /*
1662  * Used by fabric module frontends defining a TFO->new_cmd_map() caller
1663  * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
1664  * complete setup in TCM process context w/ TFO->new_cmd_map().
1665  */
1666 int transport_generic_handle_cdb_map(
1667         struct se_cmd *cmd)
1668 {
1669         if (!cmd->se_lun) {
1670                 dump_stack();
1671                 pr_err("cmd->se_lun is NULL\n");
1672                 return -EINVAL;
1673         }
1674
1675         transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
1676         return 0;
1677 }
1678 EXPORT_SYMBOL(transport_generic_handle_cdb_map);
1679
1680 /*      transport_generic_handle_data():
1681  *
1682  *
1683  */
1684 int transport_generic_handle_data(
1685         struct se_cmd *cmd)
1686 {
1687         /*
1688          * For the software fabric case, then we assume the nexus is being
1689          * failed/shutdown when signals are pending from the kthread context
1690          * caller, so we return a failure.  For the HW target mode case running
1691          * in interrupt code, the signal_pending() check is skipped.
1692          */
1693         if (!in_interrupt() && signal_pending(current))
1694                 return -EPERM;
1695         /*
1696          * If the received CDB has aleady been ABORTED by the generic
1697          * target engine, we now call transport_check_aborted_status()
1698          * to queue any delated TASK_ABORTED status for the received CDB to the
1699          * fabric module as we are expecting no further incoming DATA OUT
1700          * sequences at this point.
1701          */
1702         if (transport_check_aborted_status(cmd, 1) != 0)
1703                 return 0;
1704
1705         transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
1706         return 0;
1707 }
1708 EXPORT_SYMBOL(transport_generic_handle_data);
1709
1710 /*      transport_generic_handle_tmr():
1711  *
1712  *
1713  */
1714 int transport_generic_handle_tmr(
1715         struct se_cmd *cmd)
1716 {
1717         transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
1718         return 0;
1719 }
1720 EXPORT_SYMBOL(transport_generic_handle_tmr);
1721
1722 /*
1723  * If the task is active, request it to be stopped and sleep until it
1724  * has completed.
1725  */
1726 bool target_stop_task(struct se_task *task, unsigned long *flags)
1727 {
1728         struct se_cmd *cmd = task->task_se_cmd;
1729         bool was_active = false;
1730
1731         if (task->task_flags & TF_ACTIVE) {
1732                 task->task_flags |= TF_REQUEST_STOP;
1733                 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1734
1735                 pr_debug("Task %p waiting to complete\n", task);
1736                 wait_for_completion(&task->task_stop_comp);
1737                 pr_debug("Task %p stopped successfully\n", task);
1738
1739                 spin_lock_irqsave(&cmd->t_state_lock, *flags);
1740                 atomic_dec(&cmd->t_task_cdbs_left);
1741                 task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
1742                 was_active = true;
1743         }
1744
1745         return was_active;
1746 }
1747
1748 static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1749 {
1750         struct se_task *task, *task_tmp;
1751         unsigned long flags;
1752         int ret = 0;
1753
1754         pr_debug("ITT[0x%08x] - Stopping tasks\n",
1755                 cmd->se_tfo->get_task_tag(cmd));
1756
1757         /*
1758          * No tasks remain in the execution queue
1759          */
1760         spin_lock_irqsave(&cmd->t_state_lock, flags);
1761         list_for_each_entry_safe(task, task_tmp,
1762                                 &cmd->t_task_list, t_list) {
1763                 pr_debug("Processing task %p\n", task);
1764                 /*
1765                  * If the struct se_task has not been sent and is not active,
1766                  * remove the struct se_task from the execution queue.
1767                  */
1768                 if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
1769                         spin_unlock_irqrestore(&cmd->t_state_lock,
1770                                         flags);
1771                         transport_remove_task_from_execute_queue(task,
1772                                         cmd->se_dev);
1773
1774                         pr_debug("Task %p removed from execute queue\n", task);
1775                         spin_lock_irqsave(&cmd->t_state_lock, flags);
1776                         continue;
1777                 }
1778
1779                 if (!target_stop_task(task, &flags)) {
1780                         pr_debug("Task %p - did nothing\n", task);
1781                         ret++;
1782                 }
1783         }
1784         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1785
1786         return ret;
1787 }
1788
1789 /*
1790  * Handle SAM-esque emulation for generic transport request failures.
1791  */
1792 static void transport_generic_request_failure(struct se_cmd *cmd)
1793 {
1794         int ret = 0;
1795
1796         pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1797                 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1798                 cmd->t_task_cdb[0]);
1799         pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1800                 cmd->se_tfo->get_cmd_state(cmd),
1801                 cmd->t_state, cmd->scsi_sense_reason);
1802         pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
1803                 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1804                 " t_transport_active: %d t_transport_stop: %d"
1805                 " t_transport_sent: %d\n", cmd->t_task_list_num,
1806                 atomic_read(&cmd->t_task_cdbs_left),
1807                 atomic_read(&cmd->t_task_cdbs_sent),
1808                 atomic_read(&cmd->t_task_cdbs_ex_left),
1809                 atomic_read(&cmd->t_transport_active),
1810                 atomic_read(&cmd->t_transport_stop),
1811                 atomic_read(&cmd->t_transport_sent));
1812
1813         /*
1814          * For SAM Task Attribute emulation for failed struct se_cmd
1815          */
1816         if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1817                 transport_complete_task_attr(cmd);
1818
1819         switch (cmd->scsi_sense_reason) {
1820         case TCM_NON_EXISTENT_LUN:
1821         case TCM_UNSUPPORTED_SCSI_OPCODE:
1822         case TCM_INVALID_CDB_FIELD:
1823         case TCM_INVALID_PARAMETER_LIST:
1824         case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1825         case TCM_UNKNOWN_MODE_PAGE:
1826         case TCM_WRITE_PROTECTED:
1827         case TCM_CHECK_CONDITION_ABORT_CMD:
1828         case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1829         case TCM_CHECK_CONDITION_NOT_READY:
1830                 break;
1831         case TCM_RESERVATION_CONFLICT:
1832                 /*
1833                  * No SENSE Data payload for this case, set SCSI Status
1834                  * and queue the response to $FABRIC_MOD.
1835                  *
1836                  * Uses linux/include/scsi/scsi.h SAM status codes defs
1837                  */
1838                 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1839                 /*
1840                  * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1841                  * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1842                  * CONFLICT STATUS.
1843                  *
1844                  * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1845                  */
1846                 if (cmd->se_sess &&
1847                     cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
1848                         core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
1849                                 cmd->orig_fe_lun, 0x2C,
1850                                 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1851
1852                 ret = cmd->se_tfo->queue_status(cmd);
1853                 if (ret == -EAGAIN || ret == -ENOMEM)
1854                         goto queue_full;
1855                 goto check_stop;
1856         default:
1857                 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1858                         cmd->t_task_cdb[0], cmd->scsi_sense_reason);
1859                 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1860                 break;
1861         }
1862         /*
1863          * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
1864          * make the call to transport_send_check_condition_and_sense()
1865          * directly.  Otherwise expect the fabric to make the call to
1866          * transport_send_check_condition_and_sense() after handling
1867          * possible unsoliticied write data payloads.
1868          */
1869         ret = transport_send_check_condition_and_sense(cmd,
1870                         cmd->scsi_sense_reason, 0);
1871         if (ret == -EAGAIN || ret == -ENOMEM)
1872                 goto queue_full;
1873
1874 check_stop:
1875         transport_lun_remove_cmd(cmd);
1876         if (!transport_cmd_check_stop_to_fabric(cmd))
1877                 ;
1878         return;
1879
1880 queue_full:
1881         cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
1882         transport_handle_queue_full(cmd, cmd->se_dev);
1883 }
1884
1885 static inline u32 transport_lba_21(unsigned char *cdb)
1886 {
1887         return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
1888 }
1889
1890 static inline u32 transport_lba_32(unsigned char *cdb)
1891 {
1892         return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1893 }
1894
1895 static inline unsigned long long transport_lba_64(unsigned char *cdb)
1896 {
1897         unsigned int __v1, __v2;
1898
1899         __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1900         __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1901
1902         return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1903 }
1904
1905 /*
1906  * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
1907  */
1908 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
1909 {
1910         unsigned int __v1, __v2;
1911
1912         __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
1913         __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
1914
1915         return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1916 }
1917
1918 static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
1919 {
1920         unsigned long flags;
1921
1922         spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1923         se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1924         spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1925 }
1926
1927 static inline int transport_tcq_window_closed(struct se_device *dev)
1928 {
1929         if (dev->dev_tcq_window_closed++ <
1930                         PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
1931                 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
1932         } else
1933                 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
1934
1935         wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
1936         return 0;
1937 }
1938
1939 /*
1940  * Called from Fabric Module context from transport_execute_tasks()
1941  *
1942  * The return of this function determins if the tasks from struct se_cmd
1943  * get added to the execution queue in transport_execute_tasks(),
1944  * or are added to the delayed or ordered lists here.
1945  */
1946 static inline int transport_execute_task_attr(struct se_cmd *cmd)
1947 {
1948         if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1949                 return 1;
1950         /*
1951          * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1952          * to allow the passed struct se_cmd list of tasks to the front of the list.
1953          */
1954          if (cmd->sam_task_attr == MSG_HEAD_TAG) {
1955                 atomic_inc(&cmd->se_dev->dev_hoq_count);
1956                 smp_mb__after_atomic_inc();
1957                 pr_debug("Added HEAD_OF_QUEUE for CDB:"
1958                         " 0x%02x, se_ordered_id: %u\n",
1959                         cmd->t_task_cdb[0],
1960                         cmd->se_ordered_id);
1961                 return 1;
1962         } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
1963                 atomic_inc(&cmd->se_dev->dev_ordered_sync);
1964                 smp_mb__after_atomic_inc();
1965
1966                 pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
1967                                 " list, se_ordered_id: %u\n",
1968                                 cmd->t_task_cdb[0],
1969                                 cmd->se_ordered_id);
1970                 /*
1971                  * Add ORDERED command to tail of execution queue if
1972                  * no other older commands exist that need to be
1973                  * completed first.
1974                  */
1975                 if (!atomic_read(&cmd->se_dev->simple_cmds))
1976                         return 1;
1977         } else {
1978                 /*
1979                  * For SIMPLE and UNTAGGED Task Attribute commands
1980                  */
1981                 atomic_inc(&cmd->se_dev->simple_cmds);
1982                 smp_mb__after_atomic_inc();
1983         }
1984         /*
1985          * Otherwise if one or more outstanding ORDERED task attribute exist,
1986          * add the dormant task(s) built for the passed struct se_cmd to the
1987          * execution queue and become in Active state for this struct se_device.
1988          */
1989         if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
1990                 /*
1991                  * Otherwise, add cmd w/ tasks to delayed cmd queue that
1992                  * will be drained upon completion of HEAD_OF_QUEUE task.
1993                  */
1994                 spin_lock(&cmd->se_dev->delayed_cmd_lock);
1995                 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
1996                 list_add_tail(&cmd->se_delayed_node,
1997                                 &cmd->se_dev->delayed_cmd_list);
1998                 spin_unlock(&cmd->se_dev->delayed_cmd_lock);
1999
2000                 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
2001                         " delayed CMD list, se_ordered_id: %u\n",
2002                         cmd->t_task_cdb[0], cmd->sam_task_attr,
2003                         cmd->se_ordered_id);
2004                 /*
2005                  * Return zero to let transport_execute_tasks() know
2006                  * not to add the delayed tasks to the execution list.
2007                  */
2008                 return 0;
2009         }
2010         /*
2011          * Otherwise, no ORDERED task attributes exist..
2012          */
2013         return 1;
2014 }
2015
2016 /*
2017  * Called from fabric module context in transport_generic_new_cmd() and
2018  * transport_generic_process_write()
2019  */
2020 static int transport_execute_tasks(struct se_cmd *cmd)
2021 {
2022         int add_tasks;
2023
2024         if (se_dev_check_online(cmd->se_dev) != 0) {
2025                 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2026                 transport_generic_request_failure(cmd);
2027                 return 0;
2028         }
2029
2030         /*
2031          * Call transport_cmd_check_stop() to see if a fabric exception
2032          * has occurred that prevents execution.
2033          */
2034         if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
2035                 /*
2036                  * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2037                  * attribute for the tasks of the received struct se_cmd CDB
2038                  */
2039                 add_tasks = transport_execute_task_attr(cmd);
2040                 if (!add_tasks)
2041                         goto execute_tasks;
2042                 /*
2043                  * This calls transport_add_tasks_from_cmd() to handle
2044                  * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
2045                  * (if enabled) in __transport_add_task_to_execute_queue() and
2046                  * transport_add_task_check_sam_attr().
2047                  */
2048                 transport_add_tasks_from_cmd(cmd);
2049         }
2050         /*
2051          * Kick the execution queue for the cmd associated struct se_device
2052          * storage object.
2053          */
2054 execute_tasks:
2055         __transport_execute_tasks(cmd->se_dev);
2056         return 0;
2057 }
2058
2059 /*
2060  * Called to check struct se_device tcq depth window, and once open pull struct se_task
2061  * from struct se_device->execute_task_list and
2062  *
2063  * Called from transport_processing_thread()
2064  */
2065 static int __transport_execute_tasks(struct se_device *dev)
2066 {
2067         int error;
2068         struct se_cmd *cmd = NULL;
2069         struct se_task *task = NULL;
2070         unsigned long flags;
2071
2072         /*
2073          * Check if there is enough room in the device and HBA queue to send
2074          * struct se_tasks to the selected transport.
2075          */
2076 check_depth:
2077         if (!atomic_read(&dev->depth_left))
2078                 return transport_tcq_window_closed(dev);
2079
2080         dev->dev_tcq_window_closed = 0;
2081
2082         spin_lock_irq(&dev->execute_task_lock);
2083         if (list_empty(&dev->execute_task_list)) {
2084                 spin_unlock_irq(&dev->execute_task_lock);
2085                 return 0;
2086         }
2087         task = list_first_entry(&dev->execute_task_list,
2088                                 struct se_task, t_execute_list);
2089         __transport_remove_task_from_execute_queue(task, dev);
2090         spin_unlock_irq(&dev->execute_task_lock);
2091
2092         atomic_dec(&dev->depth_left);
2093
2094         cmd = task->task_se_cmd;
2095
2096         spin_lock_irqsave(&cmd->t_state_lock, flags);
2097         task->task_flags |= (TF_ACTIVE | TF_SENT);
2098         atomic_inc(&cmd->t_task_cdbs_sent);
2099
2100         if (atomic_read(&cmd->t_task_cdbs_sent) ==
2101             cmd->t_task_list_num)
2102                 atomic_set(&cmd->t_transport_sent, 1);
2103
2104         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2105
2106         if (cmd->execute_task)
2107                 error = cmd->execute_task(task);
2108         else
2109                 error = dev->transport->do_task(task);
2110         if (error != 0) {
2111                 spin_lock_irqsave(&cmd->t_state_lock, flags);
2112                 task->task_flags &= ~TF_ACTIVE;
2113                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2114                 atomic_set(&cmd->t_transport_sent, 0);
2115                 transport_stop_tasks_for_cmd(cmd);
2116                 atomic_inc(&dev->depth_left);
2117                 transport_generic_request_failure(cmd);
2118         }
2119
2120         goto check_depth;
2121
2122         return 0;
2123 }
2124
2125 static inline u32 transport_get_sectors_6(
2126         unsigned char *cdb,
2127         struct se_cmd *cmd,
2128         int *ret)
2129 {
2130         struct se_device *dev = cmd->se_dev;
2131
2132         /*
2133          * Assume TYPE_DISK for non struct se_device objects.
2134          * Use 8-bit sector value.
2135          */
2136         if (!dev)
2137                 goto type_disk;
2138
2139         /*
2140          * Use 24-bit allocation length for TYPE_TAPE.
2141          */
2142         if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2143                 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2144
2145         /*
2146          * Everything else assume TYPE_DISK Sector CDB location.
2147          * Use 8-bit sector value.
2148          */
2149 type_disk:
2150         return (u32)cdb[4];
2151 }
2152
2153 static inline u32 transport_get_sectors_10(
2154         unsigned char *cdb,
2155         struct se_cmd *cmd,
2156         int *ret)
2157 {
2158         struct se_device *dev = cmd->se_dev;
2159
2160         /*
2161          * Assume TYPE_DISK for non struct se_device objects.
2162          * Use 16-bit sector value.
2163          */
2164         if (!dev)
2165                 goto type_disk;
2166
2167         /*
2168          * XXX_10 is not defined in SSC, throw an exception
2169          */
2170         if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2171                 *ret = -EINVAL;
2172                 return 0;
2173         }
2174
2175         /*
2176          * Everything else assume TYPE_DISK Sector CDB location.
2177          * Use 16-bit sector value.
2178          */
2179 type_disk:
2180         return (u32)(cdb[7] << 8) + cdb[8];
2181 }
2182
2183 static inline u32 transport_get_sectors_12(
2184         unsigned char *cdb,
2185         struct se_cmd *cmd,
2186         int *ret)
2187 {
2188         struct se_device *dev = cmd->se_dev;
2189
2190         /*
2191          * Assume TYPE_DISK for non struct se_device objects.
2192          * Use 32-bit sector value.
2193          */
2194         if (!dev)
2195                 goto type_disk;
2196
2197         /*
2198          * XXX_12 is not defined in SSC, throw an exception
2199          */
2200         if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2201                 *ret = -EINVAL;
2202                 return 0;
2203         }
2204
2205         /*
2206          * Everything else assume TYPE_DISK Sector CDB location.
2207          * Use 32-bit sector value.
2208          */
2209 type_disk:
2210         return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2211 }
2212
2213 static inline u32 transport_get_sectors_16(
2214         unsigned char *cdb,
2215         struct se_cmd *cmd,
2216         int *ret)
2217 {
2218         struct se_device *dev = cmd->se_dev;
2219
2220         /*
2221          * Assume TYPE_DISK for non struct se_device objects.
2222          * Use 32-bit sector value.
2223          */
2224         if (!dev)
2225                 goto type_disk;
2226
2227         /*
2228          * Use 24-bit allocation length for TYPE_TAPE.
2229          */
2230         if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2231                 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2232
2233 type_disk:
2234         return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2235                     (cdb[12] << 8) + cdb[13];
2236 }
2237
2238 /*
2239  * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2240  */
2241 static inline u32 transport_get_sectors_32(
2242         unsigned char *cdb,
2243         struct se_cmd *cmd,
2244         int *ret)
2245 {
2246         /*
2247          * Assume TYPE_DISK for non struct se_device objects.
2248          * Use 32-bit sector value.
2249          */
2250         return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2251                     (cdb[30] << 8) + cdb[31];
2252
2253 }
2254
2255 static inline u32 transport_get_size(
2256         u32 sectors,
2257         unsigned char *cdb,
2258         struct se_cmd *cmd)
2259 {
2260         struct se_device *dev = cmd->se_dev;
2261
2262         if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2263                 if (cdb[1] & 1) { /* sectors */
2264                         return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2265                 } else /* bytes */
2266                         return sectors;
2267         }
2268 #if 0
2269         pr_debug("Returning block_size: %u, sectors: %u == %u for"
2270                         " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
2271                         dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2272                         dev->transport->name);
2273 #endif
2274         return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2275 }
2276
2277 static void transport_xor_callback(struct se_cmd *cmd)
2278 {
2279         unsigned char *buf, *addr;
2280         struct scatterlist *sg;
2281         unsigned int offset;
2282         int i;
2283         int count;
2284         /*
2285          * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2286          *
2287          * 1) read the specified logical block(s);
2288          * 2) transfer logical blocks from the data-out buffer;
2289          * 3) XOR the logical blocks transferred from the data-out buffer with
2290          *    the logical blocks read, storing the resulting XOR data in a buffer;
2291          * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2292          *    blocks transferred from the data-out buffer; and
2293          * 5) transfer the resulting XOR data to the data-in buffer.
2294          */
2295         buf = kmalloc(cmd->data_length, GFP_KERNEL);
2296         if (!buf) {
2297                 pr_err("Unable to allocate xor_callback buf\n");
2298                 return;
2299         }
2300         /*
2301          * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
2302          * into the locally allocated *buf
2303          */
2304         sg_copy_to_buffer(cmd->t_data_sg,
2305                           cmd->t_data_nents,
2306                           buf,
2307                           cmd->data_length);
2308
2309         /*
2310          * Now perform the XOR against the BIDI read memory located at
2311          * cmd->t_mem_bidi_list
2312          */
2313
2314         offset = 0;
2315         for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2316                 addr = kmap_atomic(sg_page(sg), KM_USER0);
2317                 if (!addr)
2318                         goto out;
2319
2320                 for (i = 0; i < sg->length; i++)
2321                         *(addr + sg->offset + i) ^= *(buf + offset + i);
2322
2323                 offset += sg->length;
2324                 kunmap_atomic(addr, KM_USER0);
2325         }
2326
2327 out:
2328         kfree(buf);
2329 }
2330
2331 /*
2332  * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2333  */
2334 static int transport_get_sense_data(struct se_cmd *cmd)
2335 {
2336         unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2337         struct se_device *dev = cmd->se_dev;
2338         struct se_task *task = NULL, *task_tmp;
2339         unsigned long flags;
2340         u32 offset = 0;
2341
2342         WARN_ON(!cmd->se_lun);
2343
2344         if (!dev)
2345                 return 0;
2346
2347         spin_lock_irqsave(&cmd->t_state_lock, flags);
2348         if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2349                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2350                 return 0;
2351         }
2352
2353         list_for_each_entry_safe(task, task_tmp,
2354                                 &cmd->t_task_list, t_list) {
2355                 if (!task->task_sense)
2356                         continue;
2357
2358                 if (!dev->transport->get_sense_buffer) {
2359                         pr_err("dev->transport->get_sense_buffer"
2360                                         " is NULL\n");
2361                         continue;
2362                 }
2363
2364                 sense_buffer = dev->transport->get_sense_buffer(task);
2365                 if (!sense_buffer) {
2366                         pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
2367                                 " sense buffer for task with sense\n",
2368                                 cmd->se_tfo->get_task_tag(cmd), task);
2369                         continue;
2370                 }
2371                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2372
2373                 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
2374                                 TRANSPORT_SENSE_BUFFER);
2375
2376                 memcpy(&buffer[offset], sense_buffer,
2377                                 TRANSPORT_SENSE_BUFFER);
2378                 cmd->scsi_status = task->task_scsi_status;
2379                 /* Automatically padded */
2380                 cmd->scsi_sense_length =
2381                                 (TRANSPORT_SENSE_BUFFER + offset);
2382
2383                 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
2384                                 " and sense\n",
2385                         dev->se_hba->hba_id, dev->transport->name,
2386                                 cmd->scsi_status);
2387                 return 0;
2388         }
2389         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2390
2391         return -1;
2392 }
2393
2394 static inline long long transport_dev_end_lba(struct se_device *dev)
2395 {
2396         return dev->transport->get_blocks(dev) + 1;
2397 }
2398
2399 static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2400 {
2401         struct se_device *dev = cmd->se_dev;
2402         u32 sectors;
2403
2404         if (dev->transport->get_device_type(dev) != TYPE_DISK)
2405                 return 0;
2406
2407         sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
2408
2409         if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
2410                 pr_err("LBA: %llu Sectors: %u exceeds"
2411                         " transport_dev_end_lba(): %llu\n",
2412                         cmd->t_task_lba, sectors,
2413                         transport_dev_end_lba(dev));
2414                 return -EINVAL;
2415         }
2416
2417         return 0;
2418 }
2419
2420 static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
2421 {
2422         /*
2423          * Determine if the received WRITE_SAME is used to for direct
2424          * passthrough into Linux/SCSI with struct request via TCM/pSCSI
2425          * or we are signaling the use of internal WRITE_SAME + UNMAP=1
2426          * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
2427          */
2428         int passthrough = (dev->transport->transport_type ==
2429                                 TRANSPORT_PLUGIN_PHBA_PDEV);
2430
2431         if (!passthrough) {
2432                 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
2433                         pr_err("WRITE_SAME PBDATA and LBDATA"
2434                                 " bits not supported for Block Discard"
2435                                 " Emulation\n");
2436                         return -ENOSYS;
2437                 }
2438                 /*
2439                  * Currently for the emulated case we only accept
2440                  * tpws with the UNMAP=1 bit set.
2441                  */
2442                 if (!(flags[0] & 0x08)) {
2443                         pr_err("WRITE_SAME w/o UNMAP bit not"
2444                                 " supported for Block Discard Emulation\n");
2445                         return -ENOSYS;
2446                 }
2447         }
2448
2449         return 0;
2450 }
2451
2452 /*      transport_generic_cmd_sequencer():
2453  *
2454  *      Generic Command Sequencer that should work for most DAS transport
2455  *      drivers.
2456  *
2457  *      Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
2458  *      RX Thread.
2459  *
2460  *      FIXME: Need to support other SCSI OPCODES where as well.
2461  */
2462 static int transport_generic_cmd_sequencer(
2463         struct se_cmd *cmd,
2464         unsigned char *cdb)
2465 {
2466         struct se_device *dev = cmd->se_dev;
2467         struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2468         int ret = 0, sector_ret = 0, passthrough;
2469         u32 sectors = 0, size = 0, pr_reg_type = 0;
2470         u16 service_action;
2471         u8 alua_ascq = 0;
2472         /*
2473          * Check for an existing UNIT ATTENTION condition
2474          */
2475         if (core_scsi3_ua_check(cmd, cdb) < 0) {
2476                 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2477                 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
2478                 return -EINVAL;
2479         }
2480         /*
2481          * Check status of Asymmetric Logical Unit Assignment port
2482          */
2483         ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
2484         if (ret != 0) {
2485                 /*
2486                  * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
2487                  * The ALUA additional sense code qualifier (ASCQ) is determined
2488                  * by the ALUA primary or secondary access state..
2489                  */
2490                 if (ret > 0) {
2491 #if 0
2492                         pr_debug("[%s]: ALUA TG Port not available,"
2493                                 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
2494                                 cmd->se_tfo->get_fabric_name(), alua_ascq);
2495 #endif
2496                         transport_set_sense_codes(cmd, 0x04, alua_ascq);
2497                         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2498                         cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
2499                         return -EINVAL;
2500                 }
2501                 goto out_invalid_cdb_field;
2502         }
2503         /*
2504          * Check status for SPC-3 Persistent Reservations
2505          */
2506         if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
2507                 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
2508                                         cmd, cdb, pr_reg_type) != 0) {
2509                         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2510                         cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2511                         cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2512                         return -EBUSY;
2513                 }
2514                 /*
2515                  * This means the CDB is allowed for the SCSI Initiator port
2516                  * when said port is *NOT* holding the legacy SPC-2 or
2517                  * SPC-3 Persistent Reservation.
2518                  */
2519         }
2520
2521         /*
2522          * If we operate in passthrough mode we skip most CDB emulation and
2523          * instead hand the commands down to the physical SCSI device.
2524          */
2525         passthrough =
2526                 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
2527
2528         switch (cdb[0]) {
2529         case READ_6:
2530                 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2531                 if (sector_ret)
2532                         goto out_unsupported_cdb;
2533                 size = transport_get_size(sectors, cdb, cmd);
2534                 cmd->t_task_lba = transport_lba_21(cdb);
2535                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2536                 break;
2537         case READ_10:
2538                 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2539                 if (sector_ret)
2540                         goto out_unsupported_cdb;
2541                 size = transport_get_size(sectors, cdb, cmd);
2542                 cmd->t_task_lba = transport_lba_32(cdb);
2543                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2544                 break;
2545         case READ_12:
2546                 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2547                 if (sector_ret)
2548                         goto out_unsupported_cdb;
2549                 size = transport_get_size(sectors, cdb, cmd);
2550                 cmd->t_task_lba = transport_lba_32(cdb);
2551                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2552                 break;
2553         case READ_16:
2554                 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2555                 if (sector_ret)
2556                         goto out_unsupported_cdb;
2557                 size = transport_get_size(sectors, cdb, cmd);
2558                 cmd->t_task_lba = transport_lba_64(cdb);
2559                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2560                 break;
2561         case WRITE_6:
2562                 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2563                 if (sector_ret)
2564                         goto out_unsupported_cdb;
2565                 size = transport_get_size(sectors, cdb, cmd);
2566                 cmd->t_task_lba = transport_lba_21(cdb);
2567                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2568                 break;
2569         case WRITE_10:
2570                 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2571                 if (sector_ret)
2572                         goto out_unsupported_cdb;
2573                 size = transport_get_size(sectors, cdb, cmd);
2574                 cmd->t_task_lba = transport_lba_32(cdb);
2575                 cmd->t_tasks_fua = (cdb[1] & 0x8);
2576                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2577                 break;
2578         case WRITE_12:
2579                 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2580                 if (sector_ret)
2581                         goto out_unsupported_cdb;
2582                 size = transport_get_size(sectors, cdb, cmd);
2583                 cmd->t_task_lba = transport_lba_32(cdb);
2584                 cmd->t_tasks_fua = (cdb[1] & 0x8);
2585                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2586                 break;
2587         case WRITE_16:
2588                 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2589                 if (sector_ret)
2590                         goto out_unsupported_cdb;
2591                 size = transport_get_size(sectors, cdb, cmd);
2592                 cmd->t_task_lba = transport_lba_64(cdb);
2593                 cmd->t_tasks_fua = (cdb[1] & 0x8);
2594                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2595                 break;
2596         case XDWRITEREAD_10:
2597                 if ((cmd->data_direction != DMA_TO_DEVICE) ||
2598                     !(cmd->t_tasks_bidi))
2599                         goto out_invalid_cdb_field;
2600                 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2601                 if (sector_ret)
2602                         goto out_unsupported_cdb;
2603                 size = transport_get_size(sectors, cdb, cmd);
2604                 cmd->t_task_lba = transport_lba_32(cdb);
2605                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2606
2607                 /*
2608                  * Do now allow BIDI commands for passthrough mode.
2609                  */
2610                 if (passthrough)
2611                         goto out_unsupported_cdb;
2612
2613                 /*
2614                  * Setup BIDI XOR callback to be run after I/O completion.
2615                  */
2616                 cmd->transport_complete_callback = &transport_xor_callback;
2617                 cmd->t_tasks_fua = (cdb[1] & 0x8);
2618                 break;
2619         case VARIABLE_LENGTH_CMD:
2620                 service_action = get_unaligned_be16(&cdb[8]);
2621                 switch (service_action) {
2622                 case XDWRITEREAD_32:
2623                         sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2624                         if (sector_ret)
2625                                 goto out_unsupported_cdb;
2626                         size = transport_get_size(sectors, cdb, cmd);
2627                         /*
2628                          * Use WRITE_32 and READ_32 opcodes for the emulated
2629                          * XDWRITE_READ_32 logic.
2630                          */
2631                         cmd->t_task_lba = transport_lba_64_ext(cdb);
2632                         cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2633
2634                         /*
2635                          * Do now allow BIDI commands for passthrough mode.
2636                          */
2637                         if (passthrough)
2638                                 goto out_unsupported_cdb;
2639
2640                         /*
2641                          * Setup BIDI XOR callback to be run during after I/O
2642                          * completion.
2643                          */
2644                         cmd->transport_complete_callback = &transport_xor_callback;
2645                         cmd->t_tasks_fua = (cdb[10] & 0x8);
2646                         break;
2647                 case WRITE_SAME_32:
2648                         sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2649                         if (sector_ret)
2650                                 goto out_unsupported_cdb;
2651
2652                         if (sectors)
2653                                 size = transport_get_size(1, cdb, cmd);
2654                         else {
2655                                 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
2656                                        " supported\n");
2657                                 goto out_invalid_cdb_field;
2658                         }
2659
2660                         cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
2661                         cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2662
2663                         if (target_check_write_same_discard(&cdb[10], dev) < 0)
2664                                 goto out_invalid_cdb_field;
2665                         if (!passthrough)
2666                                 cmd->execute_task = target_emulate_write_same;
2667                         break;
2668                 default:
2669                         pr_err("VARIABLE_LENGTH_CMD service action"
2670                                 " 0x%04x not supported\n", service_action);
2671                         goto out_unsupported_cdb;
2672                 }
2673                 break;
2674         case MAINTENANCE_IN:
2675                 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2676                         /* MAINTENANCE_IN from SCC-2 */
2677                         /*
2678                          * Check for emulated MI_REPORT_TARGET_PGS.
2679                          */
2680                         if (cdb[1] == MI_REPORT_TARGET_PGS &&
2681                             su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2682                                 cmd->execute_task =
2683                                         target_emulate_report_target_port_groups;
2684                         }
2685                         size = (cdb[6] << 24) | (cdb[7] << 16) |
2686                                (cdb[8] << 8) | cdb[9];
2687                 } else {
2688                         /* GPCMD_SEND_KEY from multi media commands */
2689                         size = (cdb[8] << 8) + cdb[9];
2690                 }
2691                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2692                 break;
2693         case MODE_SELECT:
2694                 size = cdb[4];
2695                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2696                 break;
2697         case MODE_SELECT_10:
2698                 size = (cdb[7] << 8) + cdb[8];
2699                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2700                 break;
2701         case MODE_SENSE:
2702                 size = cdb[4];
2703                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2704                 if (!passthrough)
2705                         cmd->execute_task = target_emulate_modesense;
2706                 break;
2707         case MODE_SENSE_10:
2708                 size = (cdb[7] << 8) + cdb[8];
2709                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2710                 if (!passthrough)
2711                         cmd->execute_task = target_emulate_modesense;
2712                 break;
2713         case GPCMD_READ_BUFFER_CAPACITY:
2714         case GPCMD_SEND_OPC:
2715         case LOG_SELECT:
2716         case LOG_SENSE:
2717                 size = (cdb[7] << 8) + cdb[8];
2718                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2719                 break;
2720         case READ_BLOCK_LIMITS:
2721                 size = READ_BLOCK_LEN;
2722                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2723                 break;
2724         case GPCMD_GET_CONFIGURATION:
2725         case GPCMD_READ_FORMAT_CAPACITIES:
2726         case GPCMD_READ_DISC_INFO:
2727         case GPCMD_READ_TRACK_RZONE_INFO:
2728                 size = (cdb[7] << 8) + cdb[8];
2729                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2730                 break;
2731         case PERSISTENT_RESERVE_IN:
2732                 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2733                         cmd->execute_task = target_scsi3_emulate_pr_in;
2734                 size = (cdb[7] << 8) + cdb[8];
2735                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2736                 break;
2737         case PERSISTENT_RESERVE_OUT:
2738                 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2739                         cmd->execute_task = target_scsi3_emulate_pr_out;
2740                 size = (cdb[7] << 8) + cdb[8];
2741                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2742                 break;
2743         case GPCMD_MECHANISM_STATUS:
2744         case GPCMD_READ_DVD_STRUCTURE:
2745                 size = (cdb[8] << 8) + cdb[9];
2746                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2747                 break;
2748         case READ_POSITION:
2749                 size = READ_POSITION_LEN;
2750                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2751                 break;
2752         case MAINTENANCE_OUT:
2753                 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2754                         /* MAINTENANCE_OUT from SCC-2
2755                          *
2756                          * Check for emulated MO_SET_TARGET_PGS.
2757                          */
2758                         if (cdb[1] == MO_SET_TARGET_PGS &&
2759                             su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2760                                 cmd->execute_task =
2761                                         target_emulate_set_target_port_groups;
2762                         }
2763
2764                         size = (cdb[6] << 24) | (cdb[7] << 16) |
2765                                (cdb[8] << 8) | cdb[9];
2766                 } else  {
2767                         /* GPCMD_REPORT_KEY from multi media commands */
2768                         size = (cdb[8] << 8) + cdb[9];
2769                 }
2770                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2771                 break;
2772         case INQUIRY:
2773                 size = (cdb[3] << 8) + cdb[4];
2774                 /*
2775                  * Do implict HEAD_OF_QUEUE processing for INQUIRY.
2776                  * See spc4r17 section 5.3
2777                  */
2778                 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2779                         cmd->sam_task_attr = MSG_HEAD_TAG;
2780                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2781                 if (!passthrough)
2782                         cmd->execute_task = target_emulate_inquiry;
2783                 break;
2784         case READ_BUFFER:
2785                 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2786                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2787                 break;
2788         case READ_CAPACITY:
2789                 size = READ_CAP_LEN;
2790                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2791                 if (!passthrough)
2792                         cmd->execute_task = target_emulate_readcapacity;
2793                 break;
2794         case READ_MEDIA_SERIAL_NUMBER:
2795         case SECURITY_PROTOCOL_IN:
2796         case SECURITY_PROTOCOL_OUT:
2797                 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2798                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2799                 break;
2800         case SERVICE_ACTION_IN:
2801                 switch (cmd->t_task_cdb[1] & 0x1f) {
2802                 case SAI_READ_CAPACITY_16:
2803                         if (!passthrough)
2804                                 cmd->execute_task =
2805                                         target_emulate_readcapacity_16;
2806                         break;
2807                 default:
2808                         if (passthrough)
2809                                 break;
2810
2811                         pr_err("Unsupported SA: 0x%02x\n",
2812                                 cmd->t_task_cdb[1] & 0x1f);
2813                         goto out_unsupported_cdb;
2814                 }
2815                 /*FALLTHROUGH*/
2816         case ACCESS_CONTROL_IN:
2817         case ACCESS_CONTROL_OUT:
2818         case EXTENDED_COPY:
2819         case READ_ATTRIBUTE:
2820         case RECEIVE_COPY_RESULTS:
2821         case WRITE_ATTRIBUTE:
2822                 size = (cdb[10] << 24) | (cdb[11] << 16) |
2823                        (cdb[12] << 8) | cdb[13];
2824                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2825                 break;
2826         case RECEIVE_DIAGNOSTIC:
2827         case SEND_DIAGNOSTIC:
2828                 size = (cdb[3] << 8) | cdb[4];
2829                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2830                 break;
2831 /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
2832 #if 0
2833         case GPCMD_READ_CD:
2834                 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2835                 size = (2336 * sectors);
2836                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2837                 break;
2838 #endif
2839         case READ_TOC:
2840                 size = cdb[8];
2841                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2842                 break;
2843         case REQUEST_SENSE:
2844                 size = cdb[4];
2845                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2846                 if (!passthrough)
2847                         cmd->execute_task = target_emulate_request_sense;
2848                 break;
2849         case READ_ELEMENT_STATUS:
2850                 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
2851                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2852                 break;
2853         case WRITE_BUFFER:
2854                 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2855                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2856                 break;
2857         case RESERVE:
2858         case RESERVE_10:
2859                 /*
2860                  * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
2861                  * Assume the passthrough or $FABRIC_MOD will tell us about it.
2862                  */
2863                 if (cdb[0] == RESERVE_10)
2864                         size = (cdb[7] << 8) | cdb[8];
2865                 else
2866                         size = cmd->data_length;
2867
2868                 /*
2869                  * Setup the legacy emulated handler for SPC-2 and
2870                  * >= SPC-3 compatible reservation handling (CRH=1)
2871                  * Otherwise, we assume the underlying SCSI logic is
2872                  * is running in SPC_PASSTHROUGH, and wants reservations
2873                  * emulation disabled.
2874                  */
2875                 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2876                         cmd->execute_task = target_scsi2_reservation_reserve;
2877                 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2878                 break;
2879         case RELEASE:
2880         case RELEASE_10:
2881                 /*
2882                  * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
2883                  * Assume the passthrough or $FABRIC_MOD will tell us about it.
2884                 */
2885                 if (cdb[0] == RELEASE_10)
2886                         size = (cdb[7] << 8) | cdb[8];
2887                 else
2888                         size = cmd->data_length;
2889
2890                 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2891                         cmd->execute_task = target_scsi2_reservation_release;
2892                 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2893                 break;
2894         case SYNCHRONIZE_CACHE:
2895         case 0x91: /* SYNCHRONIZE_CACHE_16: */
2896                 /*
2897                  * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
2898                  */
2899                 if (cdb[0] == SYNCHRONIZE_CACHE) {
2900                         sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2901                         cmd->t_task_lba = transport_lba_32(cdb);
2902                 } else {
2903                         sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2904                         cmd->t_task_lba = transport_lba_64(cdb);
2905                 }
2906                 if (sector_ret)
2907                         goto out_unsupported_cdb;
2908
2909                 size = transport_get_size(sectors, cdb, cmd);
2910                 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2911
2912                 if (passthrough)
2913                         break;
2914
2915                 /*
2916                  * Check to ensure that LBA + Range does not exceed past end of
2917                  * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
2918                  */
2919                 if ((cmd->t_task_lba != 0) || (sectors != 0)) {
2920                         if (transport_cmd_get_valid_sectors(cmd) < 0)
2921                                 goto out_invalid_cdb_field;
2922                 }
2923                 cmd->execute_task = target_emulate_synchronize_cache;
2924                 break;
2925         case UNMAP:
2926                 size = get_unaligned_be16(&cdb[7]);
2927                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2928                 if (!passthrough)
2929                         cmd->execute_task = target_emulate_unmap;
2930                 break;
2931         case WRITE_SAME_16:
2932                 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2933                 if (sector_ret)
2934                         goto out_unsupported_cdb;
2935
2936                 if (sectors)
2937                         size = transport_get_size(1, cdb, cmd);
2938                 else {
2939                         pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2940                         goto out_invalid_cdb_field;
2941                 }
2942
2943                 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
2944                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2945
2946                 if (target_check_write_same_discard(&cdb[1], dev) < 0)
2947                         goto out_invalid_cdb_field;
2948                 if (!passthrough)
2949                         cmd->execute_task = target_emulate_write_same;
2950                 break;
2951         case WRITE_SAME:
2952                 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2953                 if (sector_ret)
2954                         goto out_unsupported_cdb;
2955
2956                 if (sectors)
2957                         size = transport_get_size(1, cdb, cmd);
2958                 else {
2959                         pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2960                         goto out_invalid_cdb_field;
2961                 }
2962
2963                 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
2964                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2965                 /*
2966                  * Follow sbcr26 with WRITE_SAME (10) and check for the existence
2967                  * of byte 1 bit 3 UNMAP instead of original reserved field
2968                  */
2969                 if (target_check_write_same_discard(&cdb[1], dev) < 0)
2970                         goto out_invalid_cdb_field;
2971                 if (!passthrough)
2972                         cmd->execute_task = target_emulate_write_same;
2973                 break;
2974         case ALLOW_MEDIUM_REMOVAL:
2975         case ERASE:
2976         case REZERO_UNIT:
2977         case SEEK_10:
2978         case SPACE:
2979         case START_STOP:
2980         case TEST_UNIT_READY:
2981         case VERIFY:
2982         case WRITE_FILEMARKS:
2983                 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2984                 if (!passthrough)
2985                         cmd->execute_task = target_emulate_noop;
2986                 break;
2987         case GPCMD_CLOSE_TRACK:
2988         case INITIALIZE_ELEMENT_STATUS:
2989         case GPCMD_LOAD_UNLOAD:
2990         case GPCMD_SET_SPEED:
2991         case MOVE_MEDIUM:
2992                 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2993                 break;
2994         case REPORT_LUNS:
2995                 cmd->execute_task = target_report_luns;
2996                 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2997                 /*
2998                  * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
2999                  * See spc4r17 section 5.3
3000                  */
3001                 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3002                         cmd->sam_task_attr = MSG_HEAD_TAG;
3003                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3004                 break;
3005         default:
3006                 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
3007                         " 0x%02x, sending CHECK_CONDITION.\n",
3008                         cmd->se_tfo->get_fabric_name(), cdb[0]);
3009                 goto out_unsupported_cdb;
3010         }
3011
3012         if (size != cmd->data_length) {
3013                 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
3014                         " %u does not match SCSI CDB Length: %u for SAM Opcode:"
3015                         " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
3016                                 cmd->data_length, size, cdb[0]);
3017
3018                 cmd->cmd_spdtl = size;
3019
3020                 if (cmd->data_direction == DMA_TO_DEVICE) {
3021                         pr_err("Rejecting underflow/overflow"
3022                                         " WRITE data\n");
3023                         goto out_invalid_cdb_field;
3024                 }
3025                 /*
3026                  * Reject READ_* or WRITE_* with overflow/underflow for
3027                  * type SCF_SCSI_DATA_SG_IO_CDB.
3028                  */
3029                 if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512))  {
3030                         pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
3031                                 " CDB on non 512-byte sector setup subsystem"
3032                                 " plugin: %s\n", dev->transport->name);
3033                         /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
3034                         goto out_invalid_cdb_field;
3035                 }
3036
3037                 if (size > cmd->data_length) {
3038                         cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
3039                         cmd->residual_count = (size - cmd->data_length);
3040                 } else {
3041                         cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
3042                         cmd->residual_count = (cmd->data_length - size);
3043                 }
3044                 cmd->data_length = size;
3045         }
3046
3047         /* reject any command that we don't have a handler for */
3048         if (!(passthrough || cmd->execute_task ||
3049              (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
3050                 goto out_unsupported_cdb;
3051
3052         /* Let's limit control cdbs to a page, for simplicity's sake. */
3053         if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3054             size > PAGE_SIZE)
3055                 goto out_invalid_cdb_field;
3056
3057         transport_set_supported_SAM_opcode(cmd);
3058         return ret;
3059
3060 out_unsupported_cdb:
3061         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3062         cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3063         return -EINVAL;
3064 out_invalid_cdb_field:
3065         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3066         cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3067         return -EINVAL;
3068 }
3069
3070 /*
3071  * Called from I/O completion to determine which dormant/delayed
3072  * and ordered cmds need to have their tasks added to the execution queue.
3073  */
3074 static void transport_complete_task_attr(struct se_cmd *cmd)
3075 {
3076         struct se_device *dev = cmd->se_dev;
3077         struct se_cmd *cmd_p, *cmd_tmp;
3078         int new_active_tasks = 0;
3079
3080         if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3081                 atomic_dec(&dev->simple_cmds);
3082                 smp_mb__after_atomic_dec();
3083                 dev->dev_cur_ordered_id++;
3084                 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
3085                         " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3086                         cmd->se_ordered_id);
3087         } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3088                 atomic_dec(&dev->dev_hoq_count);
3089                 smp_mb__after_atomic_dec();
3090                 dev->dev_cur_ordered_id++;
3091                 pr_debug("Incremented dev_cur_ordered_id: %u for"
3092                         " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3093                         cmd->se_ordered_id);
3094         } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3095                 atomic_dec(&dev->dev_ordered_sync);
3096                 smp_mb__after_atomic_dec();
3097
3098                 dev->dev_cur_ordered_id++;
3099                 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
3100                         " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3101         }
3102         /*
3103          * Process all commands up to the last received
3104          * ORDERED task attribute which requires another blocking
3105          * boundary
3106          */
3107         spin_lock(&dev->delayed_cmd_lock);
3108         list_for_each_entry_safe(cmd_p, cmd_tmp,
3109                         &dev->delayed_cmd_list, se_delayed_node) {
3110
3111                 list_del(&cmd_p->se_delayed_node);
3112                 spin_unlock(&dev->delayed_cmd_lock);
3113
3114                 pr_debug("Calling add_tasks() for"
3115                         " cmd_p: 0x%02x Task Attr: 0x%02x"
3116                         " Dormant -> Active, se_ordered_id: %u\n",
3117                         cmd_p->t_task_cdb[0],
3118                         cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3119
3120                 transport_add_tasks_from_cmd(cmd_p);
3121                 new_active_tasks++;
3122
3123                 spin_lock(&dev->delayed_cmd_lock);
3124                 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3125                         break;
3126         }
3127         spin_unlock(&dev->delayed_cmd_lock);
3128         /*
3129          * If new tasks have become active, wake up the transport thread
3130          * to do the processing of the Active tasks.
3131          */
3132         if (new_active_tasks != 0)
3133                 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
3134 }
3135
3136 static void transport_complete_qf(struct se_cmd *cmd)
3137 {
3138         int ret = 0;
3139
3140         if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3141                 transport_complete_task_attr(cmd);
3142
3143         if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3144                 ret = cmd->se_tfo->queue_status(cmd);
3145                 if (ret)
3146                         goto out;
3147         }
3148
3149         switch (cmd->data_direction) {
3150         case DMA_FROM_DEVICE:
3151                 ret = cmd->se_tfo->queue_data_in(cmd);
3152                 break;
3153         case DMA_TO_DEVICE:
3154                 if (cmd->t_bidi_data_sg) {
3155                         ret = cmd->se_tfo->queue_data_in(cmd);
3156                         if (ret < 0)
3157                                 break;
3158                 }
3159                 /* Fall through for DMA_TO_DEVICE */
3160         case DMA_NONE:
3161                 ret = cmd->se_tfo->queue_status(cmd);
3162                 break;
3163         default:
3164                 break;
3165         }
3166
3167 out:
3168         if (ret < 0) {
3169                 transport_handle_queue_full(cmd, cmd->se_dev);
3170                 return;
3171         }
3172         transport_lun_remove_cmd(cmd);
3173         transport_cmd_check_stop_to_fabric(cmd);
3174 }
3175
3176 static void transport_handle_queue_full(
3177         struct se_cmd *cmd,
3178         struct se_device *dev)
3179 {
3180         spin_lock_irq(&dev->qf_cmd_lock);
3181         list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
3182         atomic_inc(&dev->dev_qf_count);
3183         smp_mb__after_atomic_inc();
3184         spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
3185
3186         schedule_work(&cmd->se_dev->qf_work_queue);
3187 }
3188
3189 static void target_complete_ok_work(struct work_struct *work)
3190 {
3191         struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3192         int reason = 0, ret;
3193
3194         /*
3195          * Check if we need to move delayed/dormant tasks from cmds on the
3196          * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3197          * Attribute.
3198          */
3199         if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3200                 transport_complete_task_attr(cmd);
3201         /*
3202          * Check to schedule QUEUE_FULL work, or execute an existing
3203          * cmd->transport_qf_callback()
3204          */
3205         if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
3206                 schedule_work(&cmd->se_dev->qf_work_queue);
3207
3208         /*
3209          * Check if we need to retrieve a sense buffer from
3210          * the struct se_cmd in question.
3211          */
3212         if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3213                 if (transport_get_sense_data(cmd) < 0)
3214                         reason = TCM_NON_EXISTENT_LUN;
3215
3216                 /*
3217                  * Only set when an struct se_task->task_scsi_status returned
3218                  * a non GOOD status.
3219                  */
3220                 if (cmd->scsi_status) {
3221                         ret = transport_send_check_condition_and_sense(
3222                                         cmd, reason, 1);
3223                         if (ret == -EAGAIN || ret == -ENOMEM)
3224                                 goto queue_full;
3225
3226                         transport_lun_remove_cmd(cmd);
3227                         transport_cmd_check_stop_to_fabric(cmd);
3228                         return;
3229                 }
3230         }
3231         /*
3232          * Check for a callback, used by amongst other things
3233          * XDWRITE_READ_10 emulation.
3234          */
3235         if (cmd->transport_complete_callback)
3236                 cmd->transport_complete_callback(cmd);
3237
3238         switch (cmd->data_direction) {
3239         case DMA_FROM_DEVICE:
3240                 spin_lock(&cmd->se_lun->lun_sep_lock);
3241                 if (cmd->se_lun->lun_sep) {
3242                         cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3243                                         cmd->data_length;
3244                 }
3245                 spin_unlock(&cmd->se_lun->lun_sep_lock);
3246
3247                 ret = cmd->se_tfo->queue_data_in(cmd);
3248                 if (ret == -EAGAIN || ret == -ENOMEM)
3249                         goto queue_full;
3250                 break;
3251         case DMA_TO_DEVICE:
3252                 spin_lock(&cmd->se_lun->lun_sep_lock);
3253                 if (cmd->se_lun->lun_sep) {
3254                         cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
3255                                 cmd->data_length;
3256                 }
3257                 spin_unlock(&cmd->se_lun->lun_sep_lock);
3258                 /*
3259                  * Check if we need to send READ payload for BIDI-COMMAND
3260                  */
3261                 if (cmd->t_bidi_data_sg) {
3262                         spin_lock(&cmd->se_lun->lun_sep_lock);
3263                         if (cmd->se_lun->lun_sep) {
3264                                 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3265                                         cmd->data_length;
3266                         }
3267                         spin_unlock(&cmd->se_lun->lun_sep_lock);
3268                         ret = cmd->se_tfo->queue_data_in(cmd);
3269                         if (ret == -EAGAIN || ret == -ENOMEM)
3270                                 goto queue_full;
3271                         break;
3272                 }
3273                 /* Fall through for DMA_TO_DEVICE */
3274         case DMA_NONE:
3275                 ret = cmd->se_tfo->queue_status(cmd);
3276                 if (ret == -EAGAIN || ret == -ENOMEM)
3277                         goto queue_full;
3278                 break;
3279         default:
3280                 break;
3281         }
3282
3283         transport_lun_remove_cmd(cmd);
3284         transport_cmd_check_stop_to_fabric(cmd);
3285         return;
3286
3287 queue_full:
3288         pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
3289                 " data_direction: %d\n", cmd, cmd->data_direction);
3290         cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
3291         transport_handle_queue_full(cmd, cmd->se_dev);
3292 }
3293
3294 static void transport_free_dev_tasks(struct se_cmd *cmd)
3295 {
3296         struct se_task *task, *task_tmp;
3297         unsigned long flags;
3298         LIST_HEAD(dispose_list);
3299
3300         spin_lock_irqsave(&cmd->t_state_lock, flags);
3301         list_for_each_entry_safe(task, task_tmp,
3302                                 &cmd->t_task_list, t_list) {
3303                 if (!(task->task_flags & TF_ACTIVE))
3304                         list_move_tail(&task->t_list, &dispose_list);
3305         }
3306         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3307
3308         while (!list_empty(&dispose_list)) {
3309                 task = list_first_entry(&dispose_list, struct se_task, t_list);
3310
3311                 if (task->task_sg != cmd->t_data_sg &&
3312                     task->task_sg != cmd->t_bidi_data_sg)
3313                         kfree(task->task_sg);
3314
3315                 list_del(&task->t_list);
3316
3317                 cmd->se_dev->transport->free_task(task);
3318         }
3319 }
3320
3321 static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
3322 {
3323         struct scatterlist *sg;
3324         int count;
3325
3326         for_each_sg(sgl, sg, nents, count)
3327                 __free_page(sg_page(sg));
3328
3329         kfree(sgl);
3330 }
3331
3332 static inline void transport_free_pages(struct se_cmd *cmd)
3333 {
3334         if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3335                 return;
3336
3337         transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
3338         cmd->t_data_sg = NULL;
3339         cmd->t_data_nents = 0;
3340
3341         transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
3342         cmd->t_bidi_data_sg = NULL;
3343         cmd->t_bidi_data_nents = 0;
3344 }
3345
3346 /**
3347  * transport_put_cmd - release a reference to a command
3348  * @cmd:       command to release
3349  *
3350  * This routine releases our reference to the command and frees it if possible.
3351  */
3352 static void transport_put_cmd(struct se_cmd *cmd)
3353 {
3354         unsigned long flags;
3355         int free_tasks = 0;
3356
3357         spin_lock_irqsave(&cmd->t_state_lock, flags);
3358         if (atomic_read(&cmd->t_fe_count)) {
3359                 if (!atomic_dec_and_test(&cmd->t_fe_count))
3360                         goto out_busy;
3361         }
3362
3363         if (atomic_read(&cmd->t_se_count)) {
3364                 if (!atomic_dec_and_test(&cmd->t_se_count))
3365                         goto out_busy;
3366         }
3367
3368         if (atomic_read(&cmd->transport_dev_active)) {
3369                 atomic_set(&cmd->transport_dev_active, 0);
3370                 transport_all_task_dev_remove_state(cmd);
3371                 free_tasks = 1;
3372         }
3373         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3374
3375         if (free_tasks != 0)
3376                 transport_free_dev_tasks(cmd);
3377
3378         transport_free_pages(cmd);
3379         transport_release_cmd(cmd);
3380         return;
3381 out_busy:
3382         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3383 }
3384
3385 /*
3386  * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
3387  * allocating in the core.
3388  * @cmd:  Associated se_cmd descriptor
3389  * @mem:  SGL style memory for TCM WRITE / READ
3390  * @sg_mem_num: Number of SGL elements
3391  * @mem_bidi_in: SGL style memory for TCM BIDI READ
3392  * @sg_mem_bidi_num: Number of BIDI READ SGL elements
3393  *
3394  * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
3395  * of parameters.
3396  */
3397 int transport_generic_map_mem_to_cmd(
3398         struct se_cmd *cmd,
3399         struct scatterlist *sgl,
3400         u32 sgl_count,
3401         struct scatterlist *sgl_bidi,
3402         u32 sgl_bidi_count)
3403 {
3404         if (!sgl || !sgl_count)
3405                 return 0;
3406
3407         if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3408             (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
3409
3410                 cmd->t_data_sg = sgl;
3411                 cmd->t_data_nents = sgl_count;
3412
3413                 if (sgl_bidi && sgl_bidi_count) {
3414                         cmd->t_bidi_data_sg = sgl_bidi;
3415                         cmd->t_bidi_data_nents = sgl_bidi_count;
3416                 }
3417                 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
3418         }
3419
3420         return 0;
3421 }
3422 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3423
3424 void *transport_kmap_first_data_page(struct se_cmd *cmd)
3425 {
3426         struct scatterlist *sg = cmd->t_data_sg;
3427
3428         BUG_ON(!sg);
3429         /*
3430          * We need to take into account a possible offset here for fabrics like
3431          * tcm_loop who may be using a contig buffer from the SCSI midlayer for
3432          * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
3433          */
3434         return kmap(sg_page(sg)) + sg->offset;
3435 }
3436 EXPORT_SYMBOL(transport_kmap_first_data_page);
3437
3438 void transport_kunmap_first_data_page(struct se_cmd *cmd)
3439 {
3440         kunmap(sg_page(cmd->t_data_sg));
3441 }
3442 EXPORT_SYMBOL(transport_kunmap_first_data_page);
3443
3444 static int
3445 transport_generic_get_mem(struct se_cmd *cmd)
3446 {
3447         u32 length = cmd->data_length;
3448         unsigned int nents;
3449         struct page *page;
3450         int i = 0;
3451
3452         nents = DIV_ROUND_UP(length, PAGE_SIZE);
3453         cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
3454         if (!cmd->t_data_sg)
3455                 return -ENOMEM;
3456
3457         cmd->t_data_nents = nents;
3458         sg_init_table(cmd->t_data_sg, nents);
3459
3460         while (length) {
3461                 u32 page_len = min_t(u32, length, PAGE_SIZE);
3462                 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3463                 if (!page)
3464                         goto out;
3465
3466                 sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
3467                 length -= page_len;
3468                 i++;
3469         }
3470         return 0;
3471
3472 out:
3473         while (i >= 0) {
3474                 __free_page(sg_page(&cmd->t_data_sg[i]));
3475                 i--;
3476         }
3477         kfree(cmd->t_data_sg);
3478         cmd->t_data_sg = NULL;
3479         return -ENOMEM;
3480 }
3481
3482 /* Reduce sectors if they are too long for the device */
3483 static inline sector_t transport_limit_task_sectors(
3484         struct se_device *dev,
3485         unsigned long long lba,
3486         sector_t sectors)
3487 {
3488         sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
3489
3490         if (dev->transport->get_device_type(dev) == TYPE_DISK)
3491                 if ((lba + sectors) > transport_dev_end_lba(dev))
3492                         sectors = ((transport_dev_end_lba(dev) - lba) + 1);
3493
3494         return sectors;
3495 }
3496
3497
3498 /*
3499  * This function can be used by HW target mode drivers to create a linked
3500  * scatterlist from all contiguously allocated struct se_task->task_sg[].
3501  * This is intended to be called during the completion path by TCM Core
3502  * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
3503  */
3504 void transport_do_task_sg_chain(struct se_cmd *cmd)
3505 {
3506         struct scatterlist *sg_first = NULL;
3507         struct scatterlist *sg_prev = NULL;
3508         int sg_prev_nents = 0;
3509         struct scatterlist *sg;
3510         struct se_task *task;
3511         u32 chained_nents = 0;
3512         int i;
3513
3514         BUG_ON(!cmd->se_tfo->task_sg_chaining);
3515
3516         /*
3517          * Walk the struct se_task list and setup scatterlist chains
3518          * for each contiguously allocated struct se_task->task_sg[].
3519          */
3520         list_for_each_entry(task, &cmd->t_task_list, t_list) {
3521                 if (!task->task_sg)
3522                         continue;
3523
3524                 if (!sg_first) {
3525                         sg_first = task->task_sg;
3526                         chained_nents = task->task_sg_nents;
3527                 } else {
3528                         sg_chain(sg_prev, sg_prev_nents, task->task_sg);
3529                         chained_nents += task->task_sg_nents;
3530                 }
3531                 /*
3532                  * For the padded tasks, use the extra SGL vector allocated
3533                  * in transport_allocate_data_tasks() for the sg_prev_nents
3534                  * offset into sg_chain() above.
3535                  *
3536                  * We do not need the padding for the last task (or a single
3537                  * task), but in that case we will never use the sg_prev_nents
3538                  * value below which would be incorrect.
3539                  */
3540                 sg_prev_nents = (task->task_sg_nents + 1);
3541                 sg_prev = task->task_sg;
3542         }
3543         /*
3544          * Setup the starting pointer and total t_tasks_sg_linked_no including
3545          * padding SGs for linking and to mark the end.
3546          */
3547         cmd->t_tasks_sg_chained = sg_first;
3548         cmd->t_tasks_sg_chained_no = chained_nents;
3549
3550         pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
3551                 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
3552                 cmd->t_tasks_sg_chained_no);
3553
3554         for_each_sg(cmd->t_tasks_sg_chained, sg,
3555                         cmd->t_tasks_sg_chained_no, i) {
3556
3557                 pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
3558                         i, sg, sg_page(sg), sg->length, sg->offset);
3559                 if (sg_is_chain(sg))
3560                         pr_debug("SG: %p sg_is_chain=1\n", sg);
3561                 if (sg_is_last(sg))
3562                         pr_debug("SG: %p sg_is_last=1\n", sg);
3563         }
3564 }
3565 EXPORT_SYMBOL(transport_do_task_sg_chain);
3566
3567 /*
3568  * Break up cmd into chunks transport can handle
3569  */
3570 static int
3571 transport_allocate_data_tasks(struct se_cmd *cmd,
3572         enum dma_data_direction data_direction,
3573         struct scatterlist *cmd_sg, unsigned int sgl_nents)
3574 {
3575         struct se_device *dev = cmd->se_dev;
3576         int task_count, i;
3577         unsigned long long lba;
3578         sector_t sectors, dev_max_sectors;
3579         u32 sector_size;
3580
3581         if (transport_cmd_get_valid_sectors(cmd) < 0)
3582                 return -EINVAL;
3583
3584         dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
3585         sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
3586
3587         WARN_ON(cmd->data_length % sector_size);
3588
3589         lba = cmd->t_task_lba;
3590         sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
3591         task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
3592
3593         /*
3594          * If we need just a single task reuse the SG list in the command
3595          * and avoid a lot of work.
3596          */
3597         if (task_count == 1) {
3598                 struct se_task *task;
3599                 unsigned long flags;
3600
3601                 task = transport_generic_get_task(cmd, data_direction);
3602                 if (!task)
3603                         return -ENOMEM;
3604
3605                 task->task_sg = cmd_sg;
3606                 task->task_sg_nents = sgl_nents;
3607
3608                 task->task_lba = lba;
3609                 task->task_sectors = sectors;
3610                 task->task_size = task->task_sectors * sector_size;
3611
3612                 spin_lock_irqsave(&cmd->t_state_lock, flags);
3613                 list_add_tail(&task->t_list, &cmd->t_task_list);
3614                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3615
3616                 return task_count;
3617         }
3618
3619         for (i = 0; i < task_count; i++) {
3620                 struct se_task *task;
3621                 unsigned int task_size, task_sg_nents_padded;
3622                 struct scatterlist *sg;
3623                 unsigned long flags;
3624                 int count;
3625
3626                 task = transport_generic_get_task(cmd, data_direction);
3627                 if (!task)
3628                         return -ENOMEM;
3629
3630                 task->task_lba = lba;
3631                 task->task_sectors = min(sectors, dev_max_sectors);
3632                 task->task_size = task->task_sectors * sector_size;
3633
3634                 /*
3635                  * This now assumes that passed sg_ents are in PAGE_SIZE chunks
3636                  * in order to calculate the number per task SGL entries
3637                  */
3638                 task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
3639                 /*
3640                  * Check if the fabric module driver is requesting that all
3641                  * struct se_task->task_sg[] be chained together..  If so,
3642                  * then allocate an extra padding SG entry for linking and
3643                  * marking the end of the chained SGL for every task except
3644                  * the last one for (task_count > 1) operation, or skipping
3645                  * the extra padding for the (task_count == 1) case.
3646                  */
3647                 if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
3648                         task_sg_nents_padded = (task->task_sg_nents + 1);
3649                 } else
3650                         task_sg_nents_padded = task->task_sg_nents;
3651
3652                 task->task_sg = kmalloc(sizeof(struct scatterlist) *
3653                                         task_sg_nents_padded, GFP_KERNEL);
3654                 if (!task->task_sg) {
3655                         cmd->se_dev->transport->free_task(task);
3656                         return -ENOMEM;
3657                 }
3658
3659                 sg_init_table(task->task_sg, task_sg_nents_padded);
3660
3661                 task_size = task->task_size;
3662
3663                 /* Build new sgl, only up to task_size */
3664                 for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
3665                         if (cmd_sg->length > task_size)
3666                                 break;
3667
3668                         *sg = *cmd_sg;
3669                         task_size -= cmd_sg->length;
3670                         cmd_sg = sg_next(cmd_sg);
3671                 }
3672
3673                 lba += task->task_sectors;
3674                 sectors -= task->task_sectors;
3675
3676                 spin_lock_irqsave(&cmd->t_state_lock, flags);
3677                 list_add_tail(&task->t_list, &cmd->t_task_list);
3678                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3679         }
3680
3681         return task_count;
3682 }
3683
3684 static int
3685 transport_allocate_control_task(struct se_cmd *cmd)
3686 {
3687         struct se_task *task;
3688         unsigned long flags;
3689
3690         task = transport_generic_get_task(cmd, cmd->data_direction);
3691         if (!task)
3692                 return -ENOMEM;
3693
3694         task->task_sg = cmd->t_data_sg;
3695         task->task_size = cmd->data_length;
3696         task->task_sg_nents = cmd->t_data_nents;
3697
3698         spin_lock_irqsave(&cmd->t_state_lock, flags);
3699         list_add_tail(&task->t_list, &cmd->t_task_list);
3700         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3701
3702         /* Success! Return number of tasks allocated */
3703         return 1;
3704 }
3705
3706 /*
3707  * Allocate any required ressources to execute the command, and either place
3708  * it on the execution queue if possible.  For writes we might not have the
3709  * payload yet, thus notify the fabric via a call to ->write_pending instead.
3710  */
3711 int transport_generic_new_cmd(struct se_cmd *cmd)
3712 {
3713         struct se_device *dev = cmd->se_dev;
3714         int task_cdbs, task_cdbs_bidi = 0;
3715         int set_counts = 1;
3716         int ret = 0;
3717
3718         /*
3719          * Determine is the TCM fabric module has already allocated physical
3720          * memory, and is directly calling transport_generic_map_mem_to_cmd()
3721          * beforehand.
3722          */
3723         if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
3724             cmd->data_length) {
3725                 ret = transport_generic_get_mem(cmd);
3726                 if (ret < 0)
3727                         goto out_fail;
3728         }
3729
3730         /*
3731          * For BIDI command set up the read tasks first.
3732          */
3733         if (cmd->t_bidi_data_sg &&
3734             dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
3735                 BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
3736
3737                 task_cdbs_bidi = transport_allocate_data_tasks(cmd,
3738                                 DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
3739                                 cmd->t_bidi_data_nents);
3740                 if (task_cdbs_bidi <= 0)
3741                         goto out_fail;
3742
3743                 atomic_inc(&cmd->t_fe_count);
3744                 atomic_inc(&cmd->t_se_count);
3745                 set_counts = 0;
3746         }
3747
3748         if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3749                 task_cdbs = transport_allocate_data_tasks(cmd,
3750                                         cmd->data_direction, cmd->t_data_sg,
3751                                         cmd->t_data_nents);
3752         } else {
3753                 task_cdbs = transport_allocate_control_task(cmd);
3754         }
3755
3756         if (task_cdbs <= 0)
3757                 goto out_fail;
3758
3759         if (set_counts) {
3760                 atomic_inc(&cmd->t_fe_count);
3761                 atomic_inc(&cmd->t_se_count);
3762         }
3763
3764         cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
3765         atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
3766         atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
3767
3768         /*
3769          * For WRITEs, let the fabric know its buffer is ready..
3770          * This WRITE struct se_cmd (and all of its associated struct se_task's)
3771          * will be added to the struct se_device execution queue after its WRITE
3772          * data has arrived. (ie: It gets handled by the transport processing
3773          * thread a second time)
3774          */
3775         if (cmd->data_direction == DMA_TO_DEVICE) {
3776                 transport_add_tasks_to_state_queue(cmd);
3777                 return transport_generic_write_pending(cmd);
3778         }
3779         /*
3780          * Everything else but a WRITE, add the struct se_cmd's struct se_task's
3781          * to the execution queue.
3782          */
3783         transport_execute_tasks(cmd);
3784         return 0;
3785
3786 out_fail:
3787         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3788         cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3789         return -EINVAL;
3790 }
3791 EXPORT_SYMBOL(transport_generic_new_cmd);
3792
3793 /*      transport_generic_process_write():
3794  *
3795  *
3796  */
3797 void transport_generic_process_write(struct se_cmd *cmd)
3798 {
3799         transport_execute_tasks(cmd);
3800 }
3801 EXPORT_SYMBOL(transport_generic_process_write);
3802
3803 static void transport_write_pending_qf(struct se_cmd *cmd)
3804 {
3805         int ret;
3806
3807         ret = cmd->se_tfo->write_pending(cmd);
3808         if (ret == -EAGAIN || ret == -ENOMEM) {
3809                 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
3810                          cmd);
3811                 transport_handle_queue_full(cmd, cmd->se_dev);
3812         }
3813 }
3814
3815 static int transport_generic_write_pending(struct se_cmd *cmd)
3816 {
3817         unsigned long flags;
3818         int ret;
3819
3820         spin_lock_irqsave(&cmd->t_state_lock, flags);
3821         cmd->t_state = TRANSPORT_WRITE_PENDING;
3822         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3823
3824         /*
3825          * Clear the se_cmd for WRITE_PENDING status in order to set
3826          * cmd->t_transport_active=0 so that transport_generic_handle_data
3827          * can be called from HW target mode interrupt code.  This is safe
3828          * to be called with transport_off=1 before the cmd->se_tfo->write_pending
3829          * because the se_cmd->se_lun pointer is not being cleared.
3830          */
3831         transport_cmd_check_stop(cmd, 1, 0);
3832
3833         /*
3834          * Call the fabric write_pending function here to let the
3835          * frontend know that WRITE buffers are ready.
3836          */
3837         ret = cmd->se_tfo->write_pending(cmd);
3838         if (ret == -EAGAIN || ret == -ENOMEM)
3839                 goto queue_full;
3840         else if (ret < 0)
3841                 return ret;
3842
3843         return 1;
3844
3845 queue_full:
3846         pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
3847         cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
3848         transport_handle_queue_full(cmd, cmd->se_dev);
3849         return 0;
3850 }
3851
3852 /**
3853  * transport_release_cmd - free a command
3854  * @cmd:       command to free
3855  *
3856  * This routine unconditionally frees a command, and reference counting
3857  * or list removal must be done in the caller.
3858  */
3859 void transport_release_cmd(struct se_cmd *cmd)
3860 {
3861         BUG_ON(!cmd->se_tfo);
3862
3863         if (cmd->se_tmr_req)
3864                 core_tmr_release_req(cmd->se_tmr_req);
3865         if (cmd->t_task_cdb != cmd->__t_task_cdb)
3866                 kfree(cmd->t_task_cdb);
3867         /*
3868          * Check if target_wait_for_sess_cmds() is expecting to
3869          * release se_cmd directly here..
3870          */
3871         if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd)
3872                 if (cmd->se_tfo->check_release_cmd(cmd) != 0)
3873                         return;
3874
3875         cmd->se_tfo->release_cmd(cmd);
3876 }
3877 EXPORT_SYMBOL(transport_release_cmd);
3878
3879 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
3880 {
3881         if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
3882                 if (wait_for_tasks && cmd->se_tmr_req)
3883                          transport_wait_for_tasks(cmd);
3884
3885                 transport_release_cmd(cmd);
3886         } else {
3887                 if (wait_for_tasks)
3888                         transport_wait_for_tasks(cmd);
3889
3890                 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
3891
3892                 if (cmd->se_lun)
3893                         transport_lun_remove_cmd(cmd);
3894
3895                 transport_free_dev_tasks(cmd);
3896
3897                 transport_put_cmd(cmd);
3898         }
3899 }
3900 EXPORT_SYMBOL(transport_generic_free_cmd);
3901
3902 /* target_get_sess_cmd - Add command to active ->sess_cmd_list
3903  * @se_sess:    session to reference
3904  * @se_cmd:     command descriptor to add
3905  */
3906 void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
3907 {
3908         unsigned long flags;
3909
3910         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3911         list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
3912         se_cmd->check_release = 1;
3913         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3914 }
3915 EXPORT_SYMBOL(target_get_sess_cmd);
3916
3917 /* target_put_sess_cmd - Check for active I/O shutdown or list delete
3918  * @se_sess:    session to reference
3919  * @se_cmd:     command descriptor to drop
3920  */
3921 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
3922 {
3923         unsigned long flags;
3924
3925         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3926         if (list_empty(&se_cmd->se_cmd_list)) {
3927                 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3928                 WARN_ON(1);
3929                 return 0;
3930         }
3931
3932         if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
3933                 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3934                 complete(&se_cmd->cmd_wait_comp);
3935                 return 1;
3936         }
3937         list_del(&se_cmd->se_cmd_list);
3938         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3939
3940         return 0;
3941 }
3942 EXPORT_SYMBOL(target_put_sess_cmd);
3943
3944 /* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
3945  * @se_sess:    session to split
3946  */
3947 void target_splice_sess_cmd_list(struct se_session *se_sess)
3948 {
3949         struct se_cmd *se_cmd;
3950         unsigned long flags;
3951
3952         WARN_ON(!list_empty(&se_sess->sess_wait_list));
3953         INIT_LIST_HEAD(&se_sess->sess_wait_list);
3954
3955         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3956         se_sess->sess_tearing_down = 1;
3957
3958         list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
3959
3960         list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
3961                 se_cmd->cmd_wait_set = 1;
3962
3963         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3964 }
3965 EXPORT_SYMBOL(target_splice_sess_cmd_list);
3966
3967 /* target_wait_for_sess_cmds - Wait for outstanding descriptors
3968  * @se_sess:    session to wait for active I/O
3969  * @wait_for_tasks:     Make extra transport_wait_for_tasks call
3970  */
3971 void target_wait_for_sess_cmds(
3972         struct se_session *se_sess,
3973         int wait_for_tasks)
3974 {
3975         struct se_cmd *se_cmd, *tmp_cmd;
3976         bool rc = false;
3977
3978         list_for_each_entry_safe(se_cmd, tmp_cmd,
3979                                 &se_sess->sess_wait_list, se_cmd_list) {
3980                 list_del(&se_cmd->se_cmd_list);
3981
3982                 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
3983                         " %d\n", se_cmd, se_cmd->t_state,
3984                         se_cmd->se_tfo->get_cmd_state(se_cmd));
3985
3986                 if (wait_for_tasks) {
3987                         pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
3988                                 " fabric state: %d\n", se_cmd, se_cmd->t_state,
3989                                 se_cmd->se_tfo->get_cmd_state(se_cmd));
3990
3991                         rc = transport_wait_for_tasks(se_cmd);
3992
3993                         pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
3994                                 " fabric state: %d\n", se_cmd, se_cmd->t_state,
3995                                 se_cmd->se_tfo->get_cmd_state(se_cmd));
3996                 }
3997
3998                 if (!rc) {
3999                         wait_for_completion(&se_cmd->cmd_wait_comp);
4000                         pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
4001                                 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4002                                 se_cmd->se_tfo->get_cmd_state(se_cmd));
4003                 }
4004
4005                 se_cmd->se_tfo->release_cmd(se_cmd);
4006         }
4007 }
4008 EXPORT_SYMBOL(target_wait_for_sess_cmds);
4009
4010 /*      transport_lun_wait_for_tasks():
4011  *
4012  *      Called from ConfigFS context to stop the passed struct se_cmd to allow
4013  *      an struct se_lun to be successfully shutdown.
4014  */
4015 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4016 {
4017         unsigned long flags;
4018         int ret;
4019         /*
4020          * If the frontend has already requested this struct se_cmd to
4021          * be stopped, we can safely ignore this struct se_cmd.
4022          */
4023         spin_lock_irqsave(&cmd->t_state_lock, flags);
4024         if (atomic_read(&cmd->t_transport_stop)) {
4025                 atomic_set(&cmd->transport_lun_stop, 0);
4026                 pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
4027                         " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
4028                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4029                 transport_cmd_check_stop(cmd, 1, 0);
4030                 return -EPERM;
4031         }
4032         atomic_set(&cmd->transport_lun_fe_stop, 1);
4033         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4034
4035         wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4036
4037         ret = transport_stop_tasks_for_cmd(cmd);
4038
4039         pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
4040                         " %d\n", cmd, cmd->t_task_list_num, ret);
4041         if (!ret) {
4042                 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
4043                                 cmd->se_tfo->get_task_tag(cmd));
4044                 wait_for_completion(&cmd->transport_lun_stop_comp);
4045                 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
4046                                 cmd->se_tfo->get_task_tag(cmd));
4047         }
4048         transport_remove_cmd_from_queue(cmd);
4049
4050         return 0;
4051 }
4052
4053 static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4054 {
4055         struct se_cmd *cmd = NULL;
4056         unsigned long lun_flags, cmd_flags;
4057         /*
4058          * Do exception processing and return CHECK_CONDITION status to the
4059          * Initiator Port.
4060          */
4061         spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4062         while (!list_empty(&lun->lun_cmd_list)) {
4063                 cmd = list_first_entry(&lun->lun_cmd_list,
4064                        struct se_cmd, se_lun_node);
4065                 list_del(&cmd->se_lun_node);
4066
4067                 atomic_set(&cmd->transport_lun_active, 0);
4068                 /*
4069                  * This will notify iscsi_target_transport.c:
4070                  * transport_cmd_check_stop() that a LUN shutdown is in
4071                  * progress for the iscsi_cmd_t.
4072                  */
4073                 spin_lock(&cmd->t_state_lock);
4074                 pr_debug("SE_LUN[%d] - Setting cmd->transport"
4075                         "_lun_stop for  ITT: 0x%08x\n",
4076                         cmd->se_lun->unpacked_lun,
4077                         cmd->se_tfo->get_task_tag(cmd));
4078                 atomic_set(&cmd->transport_lun_stop, 1);
4079                 spin_unlock(&cmd->t_state_lock);
4080
4081                 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4082
4083                 if (!cmd->se_lun) {
4084                         pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
4085                                 cmd->se_tfo->get_task_tag(cmd),
4086                                 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4087                         BUG();
4088                 }
4089                 /*
4090                  * If the Storage engine still owns the iscsi_cmd_t, determine
4091                  * and/or stop its context.
4092                  */
4093                 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
4094                         "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
4095                         cmd->se_tfo->get_task_tag(cmd));
4096
4097                 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
4098                         spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4099                         continue;
4100                 }
4101
4102                 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
4103                         "_wait_for_tasks(): SUCCESS\n",
4104                         cmd->se_lun->unpacked_lun,
4105                         cmd->se_tfo->get_task_tag(cmd));
4106
4107                 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4108                 if (!atomic_read(&cmd->transport_dev_active)) {
4109                         spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4110                         goto check_cond;
4111                 }
4112                 atomic_set(&cmd->transport_dev_active, 0);
4113                 transport_all_task_dev_remove_state(cmd);
4114                 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4115
4116                 transport_free_dev_tasks(cmd);
4117                 /*
4118                  * The Storage engine stopped this struct se_cmd before it was
4119                  * send to the fabric frontend for delivery back to the
4120                  * Initiator Node.  Return this SCSI CDB back with an
4121                  * CHECK_CONDITION status.
4122                  */
4123 check_cond:
4124                 transport_send_check_condition_and_sense(cmd,
4125                                 TCM_NON_EXISTENT_LUN, 0);
4126                 /*
4127                  *  If the fabric frontend is waiting for this iscsi_cmd_t to
4128                  * be released, notify the waiting thread now that LU has
4129                  * finished accessing it.
4130                  */
4131                 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4132                 if (atomic_read(&cmd->transport_lun_fe_stop)) {
4133                         pr_debug("SE_LUN[%d] - Detected FE stop for"
4134                                 " struct se_cmd: %p ITT: 0x%08x\n",
4135                                 lun->unpacked_lun,
4136                                 cmd, cmd->se_tfo->get_task_tag(cmd));
4137
4138                         spin_unlock_irqrestore(&cmd->t_state_lock,
4139                                         cmd_flags);
4140                         transport_cmd_check_stop(cmd, 1, 0);
4141                         complete(&cmd->transport_lun_fe_stop_comp);
4142                         spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4143                         continue;
4144                 }
4145                 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
4146                         lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
4147
4148                 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4149                 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4150         }
4151         spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4152 }
4153
4154 static int transport_clear_lun_thread(void *p)
4155 {
4156         struct se_lun *lun = (struct se_lun *)p;
4157
4158         __transport_clear_lun_from_sessions(lun);
4159         complete(&lun->lun_shutdown_comp);
4160
4161         return 0;
4162 }
4163
4164 int transport_clear_lun_from_sessions(struct se_lun *lun)
4165 {
4166         struct task_struct *kt;
4167
4168         kt = kthread_run(transport_clear_lun_thread, lun,
4169                         "tcm_cl_%u", lun->unpacked_lun);
4170         if (IS_ERR(kt)) {
4171                 pr_err("Unable to start clear_lun thread\n");
4172                 return PTR_ERR(kt);
4173         }
4174         wait_for_completion(&lun->lun_shutdown_comp);
4175
4176         return 0;
4177 }
4178
4179 /**
4180  * transport_wait_for_tasks - wait for completion to occur
4181  * @cmd:        command to wait
4182  *
4183  * Called from frontend fabric context to wait for storage engine
4184  * to pause and/or release frontend generated struct se_cmd.
4185  */
4186 bool transport_wait_for_tasks(struct se_cmd *cmd)
4187 {
4188         unsigned long flags;
4189
4190         spin_lock_irqsave(&cmd->t_state_lock, flags);
4191         if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
4192                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4193                 return false;
4194         }
4195         /*
4196          * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
4197          * has been set in transport_set_supported_SAM_opcode().
4198          */
4199         if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
4200                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4201                 return false;
4202         }
4203         /*
4204          * If we are already stopped due to an external event (ie: LUN shutdown)
4205          * sleep until the connection can have the passed struct se_cmd back.
4206          * The cmd->transport_lun_stopped_sem will be upped by
4207          * transport_clear_lun_from_sessions() once the ConfigFS context caller
4208          * has completed its operation on the struct se_cmd.
4209          */
4210         if (atomic_read(&cmd->transport_lun_stop)) {
4211
4212                 pr_debug("wait_for_tasks: Stopping"
4213                         " wait_for_completion(&cmd->t_tasktransport_lun_fe"
4214                         "_stop_comp); for ITT: 0x%08x\n",
4215                         cmd->se_tfo->get_task_tag(cmd));
4216                 /*
4217                  * There is a special case for WRITES where a FE exception +
4218                  * LUN shutdown means ConfigFS context is still sleeping on
4219                  * transport_lun_stop_comp in transport_lun_wait_for_tasks().
4220                  * We go ahead and up transport_lun_stop_comp just to be sure
4221                  * here.
4222                  */
4223                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4224                 complete(&cmd->transport_lun_stop_comp);
4225                 wait_for_completion(&cmd->transport_lun_fe_stop_comp);
4226                 spin_lock_irqsave(&cmd->t_state_lock, flags);
4227
4228                 transport_all_task_dev_remove_state(cmd);
4229                 /*
4230                  * At this point, the frontend who was the originator of this
4231                  * struct se_cmd, now owns the structure and can be released through
4232                  * normal means below.
4233                  */
4234                 pr_debug("wait_for_tasks: Stopped"
4235                         " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
4236                         "stop_comp); for ITT: 0x%08x\n",
4237                         cmd->se_tfo->get_task_tag(cmd));
4238
4239                 atomic_set(&cmd->transport_lun_stop, 0);
4240         }
4241         if (!atomic_read(&cmd->t_transport_active) ||
4242              atomic_read(&cmd->t_transport_aborted)) {
4243                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4244                 return false;
4245         }
4246
4247         atomic_set(&cmd->t_transport_stop, 1);
4248
4249         pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
4250                 " i_state: %d, t_state: %d, t_transport_stop = TRUE\n",
4251                 cmd, cmd->se_tfo->get_task_tag(cmd),
4252                 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4253
4254         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4255
4256         wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4257
4258         wait_for_completion(&cmd->t_transport_stop_comp);
4259
4260         spin_lock_irqsave(&cmd->t_state_lock, flags);
4261         atomic_set(&cmd->t_transport_active, 0);
4262         atomic_set(&cmd->t_transport_stop, 0);
4263
4264         pr_debug("wait_for_tasks: Stopped wait_for_compltion("
4265                 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
4266                 cmd->se_tfo->get_task_tag(cmd));
4267
4268         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4269
4270         return true;
4271 }
4272 EXPORT_SYMBOL(transport_wait_for_tasks);
4273
4274 static int transport_get_sense_codes(
4275         struct se_cmd *cmd,
4276         u8 *asc,
4277         u8 *ascq)
4278 {
4279         *asc = cmd->scsi_asc;
4280         *ascq = cmd->scsi_ascq;
4281
4282         return 0;
4283 }
4284
4285 static int transport_set_sense_codes(
4286         struct se_cmd *cmd,
4287         u8 asc,
4288         u8 ascq)
4289 {
4290         cmd->scsi_asc = asc;
4291         cmd->scsi_ascq = ascq;
4292
4293         return 0;
4294 }
4295
4296 int transport_send_check_condition_and_sense(
4297         struct se_cmd *cmd,
4298         u8 reason,
4299         int from_transport)
4300 {
4301         unsigned char *buffer = cmd->sense_buffer;
4302         unsigned long flags;
4303         int offset;
4304         u8 asc = 0, ascq = 0;
4305
4306         spin_lock_irqsave(&cmd->t_state_lock, flags);
4307         if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4308                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4309                 return 0;
4310         }
4311         cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
4312         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4313
4314         if (!reason && from_transport)
4315                 goto after_reason;
4316
4317         if (!from_transport)
4318                 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
4319         /*
4320          * Data Segment and SenseLength of the fabric response PDU.
4321          *
4322          * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
4323          * from include/scsi/scsi_cmnd.h
4324          */
4325         offset = cmd->se_tfo->set_fabric_sense_len(cmd,
4326                                 TRANSPORT_SENSE_BUFFER);
4327         /*
4328          * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
4329          * SENSE KEY values from include/scsi/scsi.h
4330          */
4331         switch (reason) {
4332         case TCM_NON_EXISTENT_LUN:
4333                 /* CURRENT ERROR */
4334                 buffer[offset] = 0x70;
4335                 /* ILLEGAL REQUEST */
4336                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4337                 /* LOGICAL UNIT NOT SUPPORTED */
4338                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
4339                 break;
4340         case TCM_UNSUPPORTED_SCSI_OPCODE:
4341         case TCM_SECTOR_COUNT_TOO_MANY:
4342                 /* CURRENT ERROR */
4343                 buffer[offset] = 0x70;
4344                 /* ILLEGAL REQUEST */
4345                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4346                 /* INVALID COMMAND OPERATION CODE */
4347                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
4348                 break;
4349         case TCM_UNKNOWN_MODE_PAGE:
4350                 /* CURRENT ERROR */
4351                 buffer[offset] = 0x70;
4352                 /* ILLEGAL REQUEST */
4353                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4354                 /* INVALID FIELD IN CDB */
4355                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4356                 break;
4357         case TCM_CHECK_CONDITION_ABORT_CMD:
4358                 /* CURRENT ERROR */
4359                 buffer[offset] = 0x70;
4360                 /* ABORTED COMMAND */
4361                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4362                 /* BUS DEVICE RESET FUNCTION OCCURRED */
4363                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
4364                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
4365                 break;
4366         case TCM_INCORRECT_AMOUNT_OF_DATA:
4367                 /* CURRENT ERROR */
4368                 buffer[offset] = 0x70;
4369                 /* ABORTED COMMAND */
4370                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4371                 /* WRITE ERROR */
4372                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4373                 /* NOT ENOUGH UNSOLICITED DATA */
4374                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
4375                 break;
4376         case TCM_INVALID_CDB_FIELD:
4377                 /* CURRENT ERROR */
4378                 buffer[offset] = 0x70;
4379                 /* ABORTED COMMAND */
4380                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4381                 /* INVALID FIELD IN CDB */
4382                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4383                 break;
4384         case TCM_INVALID_PARAMETER_LIST:
4385                 /* CURRENT ERROR */
4386                 buffer[offset] = 0x70;
4387                 /* ABORTED COMMAND */
4388                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4389                 /* INVALID FIELD IN PARAMETER LIST */
4390                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
4391                 break;
4392         case TCM_UNEXPECTED_UNSOLICITED_DATA:
4393                 /* CURRENT ERROR */
4394                 buffer[offset] = 0x70;
4395                 /* ABORTED COMMAND */
4396                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4397                 /* WRITE ERROR */
4398                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4399                 /* UNEXPECTED_UNSOLICITED_DATA */
4400                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
4401                 break;
4402         case TCM_SERVICE_CRC_ERROR:
4403                 /* CURRENT ERROR */
4404                 buffer[offset] = 0x70;
4405                 /* ABORTED COMMAND */
4406                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4407                 /* PROTOCOL SERVICE CRC ERROR */
4408                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
4409                 /* N/A */
4410                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
4411                 break;
4412         case TCM_SNACK_REJECTED:
4413                 /* CURRENT ERROR */
4414                 buffer[offset] = 0x70;
4415                 /* ABORTED COMMAND */
4416                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4417                 /* READ ERROR */
4418                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
4419                 /* FAILED RETRANSMISSION REQUEST */
4420                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
4421                 break;
4422         case TCM_WRITE_PROTECTED:
4423                 /* CURRENT ERROR */
4424                 buffer[offset] = 0x70;
4425                 /* DATA PROTECT */
4426                 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
4427                 /* WRITE PROTECTED */
4428                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
4429                 break;
4430         case TCM_CHECK_CONDITION_UNIT_ATTENTION:
4431                 /* CURRENT ERROR */
4432                 buffer[offset] = 0x70;
4433                 /* UNIT ATTENTION */
4434                 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
4435                 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
4436                 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4437                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4438                 break;
4439         case TCM_CHECK_CONDITION_NOT_READY:
4440                 /* CURRENT ERROR */
4441                 buffer[offset] = 0x70;
4442                 /* Not Ready */
4443                 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
4444                 transport_get_sense_codes(cmd, &asc, &ascq);
4445                 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4446                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4447                 break;
4448         case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
4449         default:
4450                 /* CURRENT ERROR */
4451                 buffer[offset] = 0x70;
4452                 /* ILLEGAL REQUEST */
4453                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4454                 /* LOGICAL UNIT COMMUNICATION FAILURE */
4455                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
4456                 break;
4457         }
4458         /*
4459          * This code uses linux/include/scsi/scsi.h SAM status codes!
4460          */
4461         cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
4462         /*
4463          * Automatically padded, this value is encoded in the fabric's
4464          * data_length response PDU containing the SCSI defined sense data.
4465          */
4466         cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;
4467
4468 after_reason:
4469         return cmd->se_tfo->queue_status(cmd);
4470 }
4471 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
4472
4473 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
4474 {
4475         int ret = 0;
4476
4477         if (atomic_read(&cmd->t_transport_aborted) != 0) {
4478                 if (!send_status ||
4479                      (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
4480                         return 1;
4481 #if 0
4482                 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
4483                         " status for CDB: 0x%02x ITT: 0x%08x\n",
4484                         cmd->t_task_cdb[0],
4485                         cmd->se_tfo->get_task_tag(cmd));
4486 #endif
4487                 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
4488                 cmd->se_tfo->queue_status(cmd);
4489                 ret = 1;
4490         }
4491         return ret;
4492 }
4493 EXPORT_SYMBOL(transport_check_aborted_status);
4494
4495 void transport_send_task_abort(struct se_cmd *cmd)
4496 {
4497         unsigned long flags;
4498
4499         spin_lock_irqsave(&cmd->t_state_lock, flags);
4500         if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4501                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4502                 return;
4503         }
4504         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4505
4506         /*
4507          * If there are still expected incoming fabric WRITEs, we wait
4508          * until until they have completed before sending a TASK_ABORTED
4509          * response.  This response with TASK_ABORTED status will be
4510          * queued back to fabric module by transport_check_aborted_status().
4511          */
4512         if (cmd->data_direction == DMA_TO_DEVICE) {
4513                 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
4514                         atomic_inc(&cmd->t_transport_aborted);
4515                         smp_mb__after_atomic_inc();
4516                 }
4517         }
4518         cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4519 #if 0
4520         pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
4521                 " ITT: 0x%08x\n", cmd->t_task_cdb[0],
4522                 cmd->se_tfo->get_task_tag(cmd));
4523 #endif
4524         cmd->se_tfo->queue_status(cmd);
4525 }
4526
4527 /*      transport_generic_do_tmr():
4528  *
4529  *
4530  */
4531 int transport_generic_do_tmr(struct se_cmd *cmd)
4532 {
4533         struct se_device *dev = cmd->se_dev;
4534         struct se_tmr_req *tmr = cmd->se_tmr_req;
4535         int ret;
4536
4537         switch (tmr->function) {
4538         case TMR_ABORT_TASK:
4539                 tmr->response = TMR_FUNCTION_REJECTED;
4540                 break;
4541         case TMR_ABORT_TASK_SET:
4542         case TMR_CLEAR_ACA:
4543         case TMR_CLEAR_TASK_SET:
4544                 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
4545                 break;
4546         case TMR_LUN_RESET:
4547                 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
4548                 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
4549                                          TMR_FUNCTION_REJECTED;
4550                 break;
4551         case TMR_TARGET_WARM_RESET:
4552                 tmr->response = TMR_FUNCTION_REJECTED;
4553                 break;
4554         case TMR_TARGET_COLD_RESET:
4555                 tmr->response = TMR_FUNCTION_REJECTED;
4556                 break;
4557         default:
4558                 pr_err("Uknown TMR function: 0x%02x.\n",
4559                                 tmr->function);
4560                 tmr->response = TMR_FUNCTION_REJECTED;
4561                 break;
4562         }
4563
4564         cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
4565         cmd->se_tfo->queue_tm_rsp(cmd);
4566
4567         transport_cmd_check_stop_to_fabric(cmd);
4568         return 0;
4569 }
4570
4571 /*      transport_processing_thread():
4572  *
4573  *
4574  */
4575 static int transport_processing_thread(void *param)
4576 {
4577         int ret;
4578         struct se_cmd *cmd;
4579         struct se_device *dev = (struct se_device *) param;
4580
4581         while (!kthread_should_stop()) {
4582                 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
4583                                 atomic_read(&dev->dev_queue_obj.queue_cnt) ||
4584                                 kthread_should_stop());
4585                 if (ret < 0)
4586                         goto out;
4587
4588 get_cmd:
4589                 __transport_execute_tasks(dev);
4590
4591                 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
4592                 if (!cmd)
4593                         continue;
4594
4595                 switch (cmd->t_state) {
4596                 case TRANSPORT_NEW_CMD:
4597                         BUG();
4598                         break;
4599                 case TRANSPORT_NEW_CMD_MAP:
4600                         if (!cmd->se_tfo->new_cmd_map) {
4601                                 pr_err("cmd->se_tfo->new_cmd_map is"
4602                                         " NULL for TRANSPORT_NEW_CMD_MAP\n");
4603                                 BUG();
4604                         }
4605                         ret = cmd->se_tfo->new_cmd_map(cmd);
4606                         if (ret < 0) {
4607                                 transport_generic_request_failure(cmd);
4608                                 break;
4609                         }
4610                         ret = transport_generic_new_cmd(cmd);
4611                         if (ret < 0) {
4612                                 transport_generic_request_failure(cmd);
4613                                 break;
4614                         }
4615                         break;
4616                 case TRANSPORT_PROCESS_WRITE:
4617                         transport_generic_process_write(cmd);
4618                         break;
4619                 case TRANSPORT_PROCESS_TMR:
4620                         transport_generic_do_tmr(cmd);
4621                         break;
4622                 case TRANSPORT_COMPLETE_QF_WP:
4623                         transport_write_pending_qf(cmd);
4624                         break;
4625                 case TRANSPORT_COMPLETE_QF_OK:
4626                         transport_complete_qf(cmd);
4627                         break;
4628                 default:
4629                         pr_err("Unknown t_state: %d  for ITT: 0x%08x "
4630                                 "i_state: %d on SE LUN: %u\n",
4631                                 cmd->t_state,
4632                                 cmd->se_tfo->get_task_tag(cmd),
4633                                 cmd->se_tfo->get_cmd_state(cmd),
4634                                 cmd->se_lun->unpacked_lun);
4635                         BUG();
4636                 }
4637
4638                 goto get_cmd;
4639         }
4640
4641 out:
4642         WARN_ON(!list_empty(&dev->state_task_list));
4643         WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
4644         dev->process_thread = NULL;
4645         return 0;
4646 }