2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 * based on qla2x00t.c code:
6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7 * Copyright (C) 2004 - 2005 Leonid Stoljar
8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9 * Copyright (C) 2006 - 2010 ID7 Ltd.
11 * Forward port and refactoring to modern qla2xxx and target/configfs
13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, version 2
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/blkdev.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/delay.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <asm/unaligned.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
43 #include "qla_target.h"
45 static int ql2xtgt_tape_enable;
46 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
47 MODULE_PARM_DESC(ql2xtgt_tape_enable,
48 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
50 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
51 module_param(qlini_mode, charp, S_IRUGO);
52 MODULE_PARM_DESC(qlini_mode,
53 "Determines when initiator mode will be enabled. Possible values: "
54 "\"exclusive\" - initiator mode will be enabled on load, "
55 "disabled on enabling target mode and then on disabling target mode "
57 "\"disabled\" - initiator mode will never be enabled; "
58 "\"enabled\" (default) - initiator mode will always stay enabled.");
60 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
62 static int temp_sam_status = SAM_STAT_BUSY;
65 * From scsi/fc/fc_fcp.h
67 enum fcp_resp_rsp_codes {
69 FCP_DATA_LEN_INVALID = 1,
70 FCP_CMND_FIELDS_INVALID = 2,
71 FCP_DATA_PARAM_MISMATCH = 3,
74 FCP_TMF_INVALID_LUN = 9,
78 * fc_pri_ta from scsi/fc/fc_fcp.h
80 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
81 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
82 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
83 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
84 #define FCP_PTA_MASK 7 /* mask for task attribute field */
85 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
86 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
89 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
90 * must be called under HW lock and could unlock/lock it inside.
91 * It isn't an issue, since in the current implementation on the time when
92 * those functions are called:
94 * - Either context is IRQ and only IRQ handler can modify HW data,
95 * including rings related fields,
97 * - Or access to target mode variables from struct qla_tgt doesn't
98 * cross those functions boundaries, except tgt_stop, which
99 * additionally protected by irq_cmd_count.
101 /* Predefs for callbacks handed to qla2xxx LLD */
102 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
103 struct atio_from_isp *pkt);
104 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
105 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
106 int fn, void *iocb, int flags);
107 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
108 *cmd, struct atio_from_isp *atio, int ha_locked);
109 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
110 struct qla_tgt_srr_imm *imm, int ha_lock);
111 static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
112 struct qla_tgt_cmd *cmd);
113 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
114 struct atio_from_isp *atio, uint16_t status, int qfull);
115 static void qlt_disable_vha(struct scsi_qla_host *vha);
119 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
120 static mempool_t *qla_tgt_mgmt_cmd_mempool;
121 static struct workqueue_struct *qla_tgt_wq;
122 static DEFINE_MUTEX(qla_tgt_mutex);
123 static LIST_HEAD(qla_tgt_glist);
125 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
126 static struct qla_tgt_sess *qlt_find_sess_by_port_name(
128 const uint8_t *port_name)
130 struct qla_tgt_sess *sess;
132 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
133 if (!memcmp(sess->port_name, port_name, WWN_SIZE))
140 /* Might release hw lock, then reaquire!! */
141 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
143 /* Send marker if required */
144 if (unlikely(vha->marker_needed != 0)) {
145 int rc = qla2x00_issue_marker(vha, vha_locked);
146 if (rc != QLA_SUCCESS) {
147 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
148 "qla_target(%d): issue_marker() failed\n",
157 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
160 struct qla_hw_data *ha = vha->hw;
163 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
166 if (vha->d_id.b.al_pa == d_id[2])
169 BUG_ON(ha->tgt.tgt_vp_map == NULL);
170 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
171 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
172 return ha->tgt.tgt_vp_map[vp_idx].vha;
178 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
181 struct qla_hw_data *ha = vha->hw;
183 if (vha->vp_idx == vp_idx)
186 BUG_ON(ha->tgt.tgt_vp_map == NULL);
187 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
188 return ha->tgt.tgt_vp_map[vp_idx].vha;
193 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
197 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
199 vha->hw->tgt.num_pend_cmds++;
200 if (vha->hw->tgt.num_pend_cmds > vha->hw->qla_stats.stat_max_pend_cmds)
201 vha->hw->qla_stats.stat_max_pend_cmds =
202 vha->hw->tgt.num_pend_cmds;
203 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
205 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
209 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
210 vha->hw->tgt.num_pend_cmds--;
211 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
214 static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
215 struct atio_from_isp *atio)
217 ql_dbg(ql_dbg_tgt, vha, 0xe072,
218 "%s: qla_target(%d): type %x ox_id %04x\n",
219 __func__, vha->vp_idx, atio->u.raw.entry_type,
220 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
222 switch (atio->u.raw.entry_type) {
225 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
226 atio->u.isp24.fcp_hdr.d_id);
227 if (unlikely(NULL == host)) {
228 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
229 "qla_target(%d): Received ATIO_TYPE7 "
230 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
231 atio->u.isp24.fcp_hdr.d_id[0],
232 atio->u.isp24.fcp_hdr.d_id[1],
233 atio->u.isp24.fcp_hdr.d_id[2]);
236 qlt_24xx_atio_pkt(host, atio);
240 case IMMED_NOTIFY_TYPE:
242 struct scsi_qla_host *host = vha;
243 struct imm_ntfy_from_isp *entry =
244 (struct imm_ntfy_from_isp *)atio;
246 if ((entry->u.isp24.vp_index != 0xFF) &&
247 (entry->u.isp24.nport_handle != 0xFFFF)) {
248 host = qlt_find_host_by_vp_idx(vha,
249 entry->u.isp24.vp_index);
250 if (unlikely(!host)) {
251 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
252 "qla_target(%d): Received "
253 "ATIO (IMMED_NOTIFY_TYPE) "
254 "with unknown vp_index %d\n",
255 vha->vp_idx, entry->u.isp24.vp_index);
259 qlt_24xx_atio_pkt(host, atio);
264 ql_dbg(ql_dbg_tgt, vha, 0xe040,
265 "qla_target(%d): Received unknown ATIO atio "
266 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
273 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
275 switch (pkt->entry_type) {
277 ql_dbg(ql_dbg_tgt, vha, 0xe073,
278 "qla_target(%d):%s: CRC2 Response pkt\n",
279 vha->vp_idx, __func__);
282 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
283 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
285 if (unlikely(!host)) {
286 ql_dbg(ql_dbg_tgt, vha, 0xe041,
287 "qla_target(%d): Response pkt (CTIO_TYPE7) "
288 "received, with unknown vp_index %d\n",
289 vha->vp_idx, entry->vp_index);
292 qlt_response_pkt(host, pkt);
296 case IMMED_NOTIFY_TYPE:
298 struct scsi_qla_host *host = vha;
299 struct imm_ntfy_from_isp *entry =
300 (struct imm_ntfy_from_isp *)pkt;
302 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
303 if (unlikely(!host)) {
304 ql_dbg(ql_dbg_tgt, vha, 0xe042,
305 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
306 "received, with unknown vp_index %d\n",
307 vha->vp_idx, entry->u.isp24.vp_index);
310 qlt_response_pkt(host, pkt);
314 case NOTIFY_ACK_TYPE:
316 struct scsi_qla_host *host = vha;
317 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
319 if (0xFF != entry->u.isp24.vp_index) {
320 host = qlt_find_host_by_vp_idx(vha,
321 entry->u.isp24.vp_index);
322 if (unlikely(!host)) {
323 ql_dbg(ql_dbg_tgt, vha, 0xe043,
324 "qla_target(%d): Response "
325 "pkt (NOTIFY_ACK_TYPE) "
326 "received, with unknown "
327 "vp_index %d\n", vha->vp_idx,
328 entry->u.isp24.vp_index);
332 qlt_response_pkt(host, pkt);
338 struct abts_recv_from_24xx *entry =
339 (struct abts_recv_from_24xx *)pkt;
340 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
342 if (unlikely(!host)) {
343 ql_dbg(ql_dbg_tgt, vha, 0xe044,
344 "qla_target(%d): Response pkt "
345 "(ABTS_RECV_24XX) received, with unknown "
346 "vp_index %d\n", vha->vp_idx, entry->vp_index);
349 qlt_response_pkt(host, pkt);
355 struct abts_resp_to_24xx *entry =
356 (struct abts_resp_to_24xx *)pkt;
357 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
359 if (unlikely(!host)) {
360 ql_dbg(ql_dbg_tgt, vha, 0xe045,
361 "qla_target(%d): Response pkt "
362 "(ABTS_RECV_24XX) received, with unknown "
363 "vp_index %d\n", vha->vp_idx, entry->vp_index);
366 qlt_response_pkt(host, pkt);
371 qlt_response_pkt(vha, pkt);
377 static void qlt_free_session_done(struct work_struct *work)
379 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
381 struct qla_tgt *tgt = sess->tgt;
382 struct scsi_qla_host *vha = sess->vha;
383 struct qla_hw_data *ha = vha->hw;
387 * Release the target session for FC Nexus from fabric module code.
389 if (sess->se_sess != NULL)
390 ha->tgt.tgt_ops->free_session(sess);
392 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
393 "Unregistration of sess %p finished\n", sess);
397 * We need to protect against race, when tgt is freed before or
401 if (tgt->sess_count == 0)
402 wake_up_all(&tgt->waitQ);
405 /* ha->hardware_lock supposed to be held on entry */
406 void qlt_unreg_sess(struct qla_tgt_sess *sess)
408 struct scsi_qla_host *vha = sess->vha;
410 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
412 list_del(&sess->sess_list_entry);
414 list_del(&sess->del_list_entry);
416 INIT_WORK(&sess->free_work, qlt_free_session_done);
417 schedule_work(&sess->free_work);
419 EXPORT_SYMBOL(qlt_unreg_sess);
421 /* ha->hardware_lock supposed to be held on entry */
422 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
424 struct qla_hw_data *ha = vha->hw;
425 struct qla_tgt_sess *sess = NULL;
426 uint32_t unpacked_lun, lun = 0;
429 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
430 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
432 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
433 if (loop_id == 0xFFFF) {
434 #if 0 /* FIXME: Re-enable Global event handling.. */
436 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
437 qlt_clear_tgt_db(ha->tgt.qla_tgt);
438 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
439 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
440 typeof(*sess), sess_list_entry);
442 case QLA_TGT_NEXUS_LOSS_SESS:
443 mcmd = QLA_TGT_NEXUS_LOSS;
445 case QLA_TGT_ABORT_ALL_SESS:
446 mcmd = QLA_TGT_ABORT_ALL;
448 case QLA_TGT_NEXUS_LOSS:
449 case QLA_TGT_ABORT_ALL:
452 ql_dbg(ql_dbg_tgt, vha, 0xe046,
453 "qla_target(%d): Not allowed "
454 "command %x in %s", vha->vp_idx,
463 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
466 ql_dbg(ql_dbg_tgt, vha, 0xe000,
467 "Using sess for qla_tgt_reset: %p\n", sess);
473 ql_dbg(ql_dbg_tgt, vha, 0xe047,
474 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
475 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
478 lun = a->u.isp24.fcp_cmnd.lun;
479 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
481 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
482 iocb, QLA24XX_MGMT_SEND_NACK);
485 /* ha->hardware_lock supposed to be held on entry */
486 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
489 struct qla_tgt *tgt = sess->tgt;
490 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
495 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
496 "Scheduling sess %p for deletion\n", sess);
497 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
503 sess->expires = jiffies + dev_loss_tmo * HZ;
505 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
506 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
507 "deletion in %u secs (expires: %lu) immed: %d\n",
508 sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo,
509 sess->expires, immediate);
512 schedule_delayed_work(&tgt->sess_del_work, 0);
514 schedule_delayed_work(&tgt->sess_del_work,
515 sess->expires - jiffies);
518 /* ha->hardware_lock supposed to be held on entry */
519 static void qlt_clear_tgt_db(struct qla_tgt *tgt)
521 struct qla_tgt_sess *sess;
523 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
524 qlt_schedule_sess_for_deletion(sess, true);
526 /* At this point tgt could be already dead */
529 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
532 struct qla_hw_data *ha = vha->hw;
533 dma_addr_t gid_list_dma;
534 struct gid_list_info *gid_list;
539 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
540 &gid_list_dma, GFP_KERNEL);
542 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
543 "qla_target(%d): DMA Alloc failed of %u\n",
544 vha->vp_idx, qla2x00_gid_list_size(ha));
548 /* Get list of logged in devices */
549 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
550 if (rc != QLA_SUCCESS) {
551 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
552 "qla_target(%d): get_id_list() failed: %x\n",
555 goto out_free_id_list;
558 id_iter = (char *)gid_list;
560 for (i = 0; i < entries; i++) {
561 struct gid_list_info *gid = (struct gid_list_info *)id_iter;
562 if ((gid->al_pa == s_id[2]) &&
563 (gid->area == s_id[1]) &&
564 (gid->domain == s_id[0])) {
565 *loop_id = le16_to_cpu(gid->loop_id);
569 id_iter += ha->gid_list_info_size;
573 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
574 gid_list, gid_list_dma);
578 /* ha->hardware_lock supposed to be held on entry */
579 static void qlt_undelete_sess(struct qla_tgt_sess *sess)
581 BUG_ON(!sess->deleted);
583 list_del(&sess->del_list_entry);
587 static void qlt_del_sess_work_fn(struct delayed_work *work)
589 struct qla_tgt *tgt = container_of(work, struct qla_tgt,
591 struct scsi_qla_host *vha = tgt->vha;
592 struct qla_hw_data *ha = vha->hw;
593 struct qla_tgt_sess *sess;
594 unsigned long flags, elapsed;
596 spin_lock_irqsave(&ha->hardware_lock, flags);
597 while (!list_empty(&tgt->del_sess_list)) {
598 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
601 if (time_after_eq(elapsed, sess->expires)) {
602 qlt_undelete_sess(sess);
604 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
605 "Timeout: sess %p about to be deleted\n",
607 ha->tgt.tgt_ops->shutdown_sess(sess);
608 ha->tgt.tgt_ops->put_sess(sess);
610 schedule_delayed_work(&tgt->sess_del_work,
611 sess->expires - elapsed);
615 spin_unlock_irqrestore(&ha->hardware_lock, flags);
619 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
620 * Caller must put it.
622 static struct qla_tgt_sess *qlt_create_sess(
623 struct scsi_qla_host *vha,
627 struct qla_hw_data *ha = vha->hw;
628 struct qla_tgt_sess *sess;
630 unsigned char be_sid[3];
632 /* Check to avoid double sessions */
633 spin_lock_irqsave(&ha->hardware_lock, flags);
634 list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
636 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
637 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
638 "Double sess %p found (s_id %x:%x:%x, "
639 "loop_id %d), updating to d_id %x:%x:%x, "
640 "loop_id %d", sess, sess->s_id.b.domain,
641 sess->s_id.b.al_pa, sess->s_id.b.area,
642 sess->loop_id, fcport->d_id.b.domain,
643 fcport->d_id.b.al_pa, fcport->d_id.b.area,
647 qlt_undelete_sess(sess);
649 kref_get(&sess->se_sess->sess_kref);
650 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
651 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
653 if (sess->local && !local)
655 spin_unlock_irqrestore(&ha->hardware_lock, flags);
660 spin_unlock_irqrestore(&ha->hardware_lock, flags);
662 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
664 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
665 "qla_target(%u): session allocation failed, all commands "
666 "from port %8phC will be refused", vha->vp_idx,
671 sess->tgt = vha->vha_tgt.qla_tgt;
673 sess->s_id = fcport->d_id;
674 sess->loop_id = fcport->loop_id;
677 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
678 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
679 sess, vha->vha_tgt.qla_tgt);
681 be_sid[0] = sess->s_id.b.domain;
682 be_sid[1] = sess->s_id.b.area;
683 be_sid[2] = sess->s_id.b.al_pa;
685 * Determine if this fc_port->port_name is allowed to access
686 * target mode using explict NodeACLs+MappedLUNs, or using
687 * TPG demo mode. If this is successful a target mode FC nexus
690 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
691 &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
696 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
697 * access across ->hardware_lock reaquire.
699 kref_get(&sess->se_sess->sess_kref);
701 sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
702 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
703 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
705 spin_lock_irqsave(&ha->hardware_lock, flags);
706 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
707 vha->vha_tgt.qla_tgt->sess_count++;
708 spin_unlock_irqrestore(&ha->hardware_lock, flags);
710 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
711 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
712 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
713 vha->vp_idx, local ? "local " : "", fcport->port_name,
714 fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
715 sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
721 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
723 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
725 struct qla_hw_data *ha = vha->hw;
726 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
727 struct qla_tgt_sess *sess;
730 if (!vha->hw->tgt.tgt_ops)
733 if (!tgt || (fcport->port_type != FCT_INITIATOR))
736 if (qla_ini_mode_enabled(vha))
739 spin_lock_irqsave(&ha->hardware_lock, flags);
741 spin_unlock_irqrestore(&ha->hardware_lock, flags);
744 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
746 spin_unlock_irqrestore(&ha->hardware_lock, flags);
748 mutex_lock(&vha->vha_tgt.tgt_mutex);
749 sess = qlt_create_sess(vha, fcport, false);
750 mutex_unlock(&vha->vha_tgt.tgt_mutex);
752 spin_lock_irqsave(&ha->hardware_lock, flags);
754 kref_get(&sess->se_sess->sess_kref);
757 qlt_undelete_sess(sess);
759 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
760 "qla_target(%u): %ssession for port %8phC "
761 "(loop ID %d) reappeared\n", vha->vp_idx,
762 sess->local ? "local " : "", sess->port_name,
765 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
766 "Reappeared sess %p\n", sess);
768 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
769 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
772 if (sess && sess->local) {
773 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
774 "qla_target(%u): local session for "
775 "port %8phC (loop ID %d) became global\n", vha->vp_idx,
776 fcport->port_name, sess->loop_id);
779 ha->tgt.tgt_ops->put_sess(sess);
780 spin_unlock_irqrestore(&ha->hardware_lock, flags);
783 void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
785 struct qla_hw_data *ha = vha->hw;
786 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
787 struct qla_tgt_sess *sess;
790 if (!vha->hw->tgt.tgt_ops)
793 if (!tgt || (fcport->port_type != FCT_INITIATOR))
796 spin_lock_irqsave(&ha->hardware_lock, flags);
798 spin_unlock_irqrestore(&ha->hardware_lock, flags);
801 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
803 spin_unlock_irqrestore(&ha->hardware_lock, flags);
807 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
810 qlt_schedule_sess_for_deletion(sess, false);
811 spin_unlock_irqrestore(&ha->hardware_lock, flags);
814 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
816 struct qla_hw_data *ha = tgt->ha;
820 * We need to protect against race, when tgt is freed before or
823 spin_lock_irqsave(&ha->hardware_lock, flags);
824 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
825 "tgt %p, empty(sess_list)=%d sess_count=%d\n",
826 tgt, list_empty(&tgt->sess_list), tgt->sess_count);
827 res = (tgt->sess_count == 0);
828 spin_unlock_irqrestore(&ha->hardware_lock, flags);
833 /* Called by tcm_qla2xxx configfs code */
834 int qlt_stop_phase1(struct qla_tgt *tgt)
836 struct scsi_qla_host *vha = tgt->vha;
837 struct qla_hw_data *ha = tgt->ha;
840 mutex_lock(&qla_tgt_mutex);
841 if (!vha->fc_vport) {
842 struct Scsi_Host *sh = vha->host;
843 struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
846 spin_lock_irqsave(sh->host_lock, flags);
847 npiv_vports = (fc_host->npiv_vports_inuse);
848 spin_unlock_irqrestore(sh->host_lock, flags);
851 mutex_unlock(&qla_tgt_mutex);
855 if (tgt->tgt_stop || tgt->tgt_stopped) {
856 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
857 "Already in tgt->tgt_stop or tgt_stopped state\n");
858 mutex_unlock(&qla_tgt_mutex);
862 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
865 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
866 * Lock is needed, because we still can get an incoming packet.
868 mutex_lock(&vha->vha_tgt.tgt_mutex);
869 spin_lock_irqsave(&ha->hardware_lock, flags);
871 qlt_clear_tgt_db(tgt);
872 spin_unlock_irqrestore(&ha->hardware_lock, flags);
873 mutex_unlock(&vha->vha_tgt.tgt_mutex);
874 mutex_unlock(&qla_tgt_mutex);
876 flush_delayed_work(&tgt->sess_del_work);
878 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
879 "Waiting for sess works (tgt %p)", tgt);
880 spin_lock_irqsave(&tgt->sess_work_lock, flags);
881 while (!list_empty(&tgt->sess_works_list)) {
882 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
883 flush_scheduled_work();
884 spin_lock_irqsave(&tgt->sess_work_lock, flags);
886 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
888 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
889 "Waiting for tgt %p: list_empty(sess_list)=%d "
890 "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
893 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
896 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
897 qlt_disable_vha(vha);
899 /* Wait for sessions to clear out (just in case) */
900 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
903 EXPORT_SYMBOL(qlt_stop_phase1);
905 /* Called by tcm_qla2xxx configfs code */
906 void qlt_stop_phase2(struct qla_tgt *tgt)
908 struct qla_hw_data *ha = tgt->ha;
909 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
912 if (tgt->tgt_stopped) {
913 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
914 "Already in tgt->tgt_stopped state\n");
919 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
920 "Waiting for %d IRQ commands to complete (tgt %p)",
921 tgt->irq_cmd_count, tgt);
923 mutex_lock(&vha->vha_tgt.tgt_mutex);
924 spin_lock_irqsave(&ha->hardware_lock, flags);
925 while (tgt->irq_cmd_count != 0) {
926 spin_unlock_irqrestore(&ha->hardware_lock, flags);
928 spin_lock_irqsave(&ha->hardware_lock, flags);
931 tgt->tgt_stopped = 1;
932 spin_unlock_irqrestore(&ha->hardware_lock, flags);
933 mutex_unlock(&vha->vha_tgt.tgt_mutex);
935 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
938 EXPORT_SYMBOL(qlt_stop_phase2);
940 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
941 static void qlt_release(struct qla_tgt *tgt)
943 scsi_qla_host_t *vha = tgt->vha;
945 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
946 qlt_stop_phase2(tgt);
948 vha->vha_tgt.qla_tgt = NULL;
950 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
951 "Release of tgt %p finished\n", tgt);
956 /* ha->hardware_lock supposed to be held on entry */
957 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
958 const void *param, unsigned int param_size)
960 struct qla_tgt_sess_work_param *prm;
963 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
965 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
966 "qla_target(%d): Unable to create session "
967 "work, command will be refused", 0);
971 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
972 "Scheduling work (type %d, prm %p)"
973 " to find session for param %p (size %d, tgt %p)\n",
974 type, prm, param, param_size, tgt);
977 memcpy(&prm->tm_iocb, param, param_size);
979 spin_lock_irqsave(&tgt->sess_work_lock, flags);
980 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
981 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
983 schedule_work(&tgt->sess_work);
989 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
991 static void qlt_send_notify_ack(struct scsi_qla_host *vha,
992 struct imm_ntfy_from_isp *ntfy,
993 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
994 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
996 struct qla_hw_data *ha = vha->hw;
998 struct nack_to_isp *nack;
1000 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1002 /* Send marker if required */
1003 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1006 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
1008 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1009 "qla_target(%d): %s failed: unable to allocate "
1010 "request packet\n", vha->vp_idx, __func__);
1014 if (vha->vha_tgt.qla_tgt != NULL)
1015 vha->vha_tgt.qla_tgt->notify_ack_expected++;
1017 pkt->entry_type = NOTIFY_ACK_TYPE;
1018 pkt->entry_count = 1;
1020 nack = (struct nack_to_isp *)pkt;
1021 nack->ox_id = ntfy->ox_id;
1023 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1024 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1025 nack->u.isp24.flags = ntfy->u.isp24.flags &
1026 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1028 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1029 nack->u.isp24.status = ntfy->u.isp24.status;
1030 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1031 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1032 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1033 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1034 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1035 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1036 nack->u.isp24.srr_reject_code = srr_reject_code;
1037 nack->u.isp24.srr_reject_code_expl = srr_explan;
1038 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1040 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1041 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1042 vha->vp_idx, nack->u.isp24.status);
1044 /* Memory Barrier */
1046 qla2x00_start_iocbs(vha, vha->req);
1050 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1052 static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
1053 struct abts_recv_from_24xx *abts, uint32_t status,
1056 struct qla_hw_data *ha = vha->hw;
1057 struct abts_resp_to_24xx *resp;
1061 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1062 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1065 /* Send marker if required */
1066 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1069 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1071 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1072 "qla_target(%d): %s failed: unable to allocate "
1073 "request packet", vha->vp_idx, __func__);
1077 resp->entry_type = ABTS_RESP_24XX;
1078 resp->entry_count = 1;
1079 resp->nport_handle = abts->nport_handle;
1080 resp->vp_index = vha->vp_idx;
1081 resp->sof_type = abts->sof_type;
1082 resp->exchange_address = abts->exchange_address;
1083 resp->fcp_hdr_le = abts->fcp_hdr_le;
1084 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1085 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1086 F_CTL_SEQ_INITIATIVE);
1087 p = (uint8_t *)&f_ctl;
1088 resp->fcp_hdr_le.f_ctl[0] = *p++;
1089 resp->fcp_hdr_le.f_ctl[1] = *p++;
1090 resp->fcp_hdr_le.f_ctl[2] = *p;
1092 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
1093 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
1094 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1095 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1096 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1097 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1099 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1100 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1101 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1102 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1103 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1104 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1106 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1107 if (status == FCP_TMF_CMPL) {
1108 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1109 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1110 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1111 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1112 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1113 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1115 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1116 resp->payload.ba_rjt.reason_code =
1117 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1118 /* Other bytes are zero */
1121 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1123 /* Memory Barrier */
1125 qla2x00_start_iocbs(vha, vha->req);
1129 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1131 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1132 struct abts_resp_from_24xx_fw *entry)
1134 struct ctio7_to_24xx *ctio;
1136 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1137 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
1138 /* Send marker if required */
1139 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1142 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1144 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1145 "qla_target(%d): %s failed: unable to allocate "
1146 "request packet\n", vha->vp_idx, __func__);
1151 * We've got on entrance firmware's response on by us generated
1152 * ABTS response. So, in it ID fields are reversed.
1155 ctio->entry_type = CTIO_TYPE7;
1156 ctio->entry_count = 1;
1157 ctio->nport_handle = entry->nport_handle;
1158 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1159 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1160 ctio->vp_index = vha->vp_idx;
1161 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1162 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1163 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1164 ctio->exchange_addr = entry->exchange_addr_to_abort;
1165 ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1166 CTIO7_FLAGS_TERMINATE);
1167 ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
1169 /* Memory Barrier */
1171 qla2x00_start_iocbs(vha, vha->req);
1173 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
1174 FCP_TMF_CMPL, true);
1177 /* ha->hardware_lock supposed to be held on entry */
1178 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1179 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
1181 struct qla_hw_data *ha = vha->hw;
1182 struct se_session *se_sess = sess->se_sess;
1183 struct qla_tgt_mgmt_cmd *mcmd;
1184 struct se_cmd *se_cmd;
1187 bool found_lun = false;
1189 spin_lock(&se_sess->sess_cmd_lock);
1190 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1191 struct qla_tgt_cmd *cmd =
1192 container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
1193 if (se_cmd->tag == abts->exchange_addr_to_abort) {
1194 lun = cmd->unpacked_lun;
1199 spin_unlock(&se_sess->sess_cmd_lock);
1204 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1205 "qla_target(%d): task abort (tag=%d)\n",
1206 vha->vp_idx, abts->exchange_addr_to_abort);
1208 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
1210 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
1211 "qla_target(%d): %s: Allocation of ABORT cmd failed",
1212 vha->vp_idx, __func__);
1215 memset(mcmd, 0, sizeof(*mcmd));
1218 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1219 mcmd->reset_count = vha->hw->chip_reset;
1221 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
1222 abts->exchange_addr_to_abort);
1224 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
1225 "qla_target(%d): tgt_ops->handle_tmr()"
1226 " failed: %d", vha->vp_idx, rc);
1227 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1235 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1237 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1238 struct abts_recv_from_24xx *abts)
1240 struct qla_hw_data *ha = vha->hw;
1241 struct qla_tgt_sess *sess;
1242 uint32_t tag = abts->exchange_addr_to_abort;
1246 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1247 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
1248 "qla_target(%d): ABTS: Abort Sequence not "
1249 "supported\n", vha->vp_idx);
1250 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1254 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
1255 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
1256 "qla_target(%d): ABTS: Unknown Exchange "
1257 "Address received\n", vha->vp_idx);
1258 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1262 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
1263 "qla_target(%d): task abort (s_id=%x:%x:%x, "
1264 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
1265 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
1266 le32_to_cpu(abts->fcp_hdr_le.parameter));
1268 s_id[0] = abts->fcp_hdr_le.s_id[2];
1269 s_id[1] = abts->fcp_hdr_le.s_id[1];
1270 s_id[2] = abts->fcp_hdr_le.s_id[0];
1272 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
1274 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
1275 "qla_target(%d): task abort for non-existant session\n",
1277 rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
1278 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1280 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
1286 rc = __qlt_24xx_handle_abts(vha, abts, sess);
1288 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
1289 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1291 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1297 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1299 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1300 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
1302 struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
1303 struct ctio7_to_24xx *ctio;
1306 ql_dbg(ql_dbg_tgt, ha, 0xe008,
1307 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1308 ha, atio, resp_code);
1310 /* Send marker if required */
1311 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
1314 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
1316 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
1317 "qla_target(%d): %s failed: unable to allocate "
1318 "request packet\n", ha->vp_idx, __func__);
1322 ctio->entry_type = CTIO_TYPE7;
1323 ctio->entry_count = 1;
1324 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1325 ctio->nport_handle = mcmd->sess->loop_id;
1326 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1327 ctio->vp_index = ha->vp_idx;
1328 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1329 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1330 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1331 ctio->exchange_addr = atio->u.isp24.exchange_addr;
1332 ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
1333 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
1334 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1335 ctio->u.status1.ox_id = cpu_to_le16(temp);
1336 ctio->u.status1.scsi_status =
1337 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
1338 ctio->u.status1.response_len = cpu_to_le16(8);
1339 ctio->u.status1.sense_data[0] = resp_code;
1341 /* Memory Barrier */
1343 qla2x00_start_iocbs(ha, ha->req);
1346 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
1348 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1350 EXPORT_SYMBOL(qlt_free_mcmd);
1352 /* callback from target fabric module code */
1353 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
1355 struct scsi_qla_host *vha = mcmd->sess->vha;
1356 struct qla_hw_data *ha = vha->hw;
1357 unsigned long flags;
1359 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
1360 "TM response mcmd (%p) status %#x state %#x",
1361 mcmd, mcmd->fc_tm_rsp, mcmd->flags);
1363 spin_lock_irqsave(&ha->hardware_lock, flags);
1365 if (qla2x00_reset_active(vha) || mcmd->reset_count != ha->chip_reset) {
1367 * Either a chip reset is active or this request was from
1368 * previous life, just abort the processing.
1370 ql_dbg(ql_dbg_async, vha, 0xe100,
1371 "RESET-TMR active/old-count/new-count = %d/%d/%d.\n",
1372 qla2x00_reset_active(vha), mcmd->reset_count,
1374 ha->tgt.tgt_ops->free_mcmd(mcmd);
1375 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1379 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
1380 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
1383 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
1384 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
1385 mcmd->fc_tm_rsp, false);
1387 qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
1391 * Make the callback for ->free_mcmd() to queue_work() and invoke
1392 * target_put_sess_cmd() to drop cmd_kref to 1. The final
1393 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1394 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1395 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1396 * qlt_xmit_tm_rsp() returns here..
1398 ha->tgt.tgt_ops->free_mcmd(mcmd);
1399 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1401 EXPORT_SYMBOL(qlt_xmit_tm_rsp);
1404 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
1406 struct qla_tgt_cmd *cmd = prm->cmd;
1408 BUG_ON(cmd->sg_cnt == 0);
1410 prm->sg = (struct scatterlist *)cmd->sg;
1411 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
1412 cmd->sg_cnt, cmd->dma_data_direction);
1413 if (unlikely(prm->seg_cnt == 0))
1416 prm->cmd->sg_mapped = 1;
1418 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
1420 * If greater than four sg entries then we need to allocate
1421 * the continuation entries
1423 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
1424 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
1425 prm->tgt->datasegs_per_cmd,
1426 prm->tgt->datasegs_per_cont);
1429 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
1430 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
1431 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
1432 prm->tot_dsds = prm->seg_cnt;
1434 prm->tot_dsds = prm->seg_cnt;
1436 if (cmd->prot_sg_cnt) {
1437 prm->prot_sg = cmd->prot_sg;
1438 prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
1439 cmd->prot_sg, cmd->prot_sg_cnt,
1440 cmd->dma_data_direction);
1441 if (unlikely(prm->prot_seg_cnt == 0))
1444 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
1445 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
1446 /* Dif Bundling not support here */
1447 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
1449 prm->tot_dsds += prm->prot_seg_cnt;
1451 prm->tot_dsds += prm->prot_seg_cnt;
1458 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
1459 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1460 0, prm->cmd->sg_cnt);
1464 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
1466 struct qla_hw_data *ha = vha->hw;
1468 if (!cmd->sg_mapped)
1471 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
1474 if (cmd->prot_sg_cnt)
1475 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
1476 cmd->dma_data_direction);
1478 if (cmd->ctx_dsd_alloced)
1479 qla2x00_clean_dsd_pool(ha, NULL, cmd);
1482 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
1485 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
1488 uint32_t cnt, cnt_in;
1490 if (vha->req->cnt < (req_cnt + 2)) {
1491 cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
1492 cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in);
1494 if (vha->req->ring_index < cnt)
1495 vha->req->cnt = cnt - vha->req->ring_index;
1497 vha->req->cnt = vha->req->length -
1498 (vha->req->ring_index - cnt);
1501 if (unlikely(vha->req->cnt < (req_cnt + 2))) {
1502 ql_dbg(ql_dbg_io, vha, 0x305a,
1503 "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
1504 vha->vp_idx, vha->req->ring_index,
1505 vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length);
1508 vha->req->cnt -= req_cnt;
1514 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1516 static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
1518 /* Adjust ring index. */
1519 vha->req->ring_index++;
1520 if (vha->req->ring_index == vha->req->length) {
1521 vha->req->ring_index = 0;
1522 vha->req->ring_ptr = vha->req->ring;
1524 vha->req->ring_ptr++;
1526 return (cont_entry_t *)vha->req->ring_ptr;
1529 /* ha->hardware_lock supposed to be held on entry */
1530 static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
1532 struct qla_hw_data *ha = vha->hw;
1535 h = ha->tgt.current_handle;
1536 /* always increment cmd handle */
1539 if (h > DEFAULT_OUTSTANDING_COMMANDS)
1540 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
1541 if (h == ha->tgt.current_handle) {
1542 ql_dbg(ql_dbg_io, vha, 0x305b,
1543 "qla_target(%d): Ran out of "
1544 "empty cmd slots in ha %p\n", vha->vp_idx, ha);
1545 h = QLA_TGT_NULL_HANDLE;
1548 } while ((h == QLA_TGT_NULL_HANDLE) ||
1549 (h == QLA_TGT_SKIP_HANDLE) ||
1550 (ha->tgt.cmds[h-1] != NULL));
1552 if (h != QLA_TGT_NULL_HANDLE)
1553 ha->tgt.current_handle = h;
1558 /* ha->hardware_lock supposed to be held on entry */
1559 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1560 struct scsi_qla_host *vha)
1563 struct ctio7_to_24xx *pkt;
1564 struct qla_hw_data *ha = vha->hw;
1565 struct atio_from_isp *atio = &prm->cmd->atio;
1568 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
1570 memset(pkt, 0, sizeof(*pkt));
1572 pkt->entry_type = CTIO_TYPE7;
1573 pkt->entry_count = (uint8_t)prm->req_cnt;
1574 pkt->vp_index = vha->vp_idx;
1576 h = qlt_make_handle(vha);
1577 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1579 * CTIO type 7 from the firmware doesn't provide a way to
1580 * know the initiator's LOOP ID, hence we can't find
1581 * the session and, so, the command.
1585 ha->tgt.cmds[h-1] = prm->cmd;
1587 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
1588 pkt->nport_handle = prm->cmd->loop_id;
1589 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1590 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1591 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1592 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1593 pkt->exchange_addr = atio->u.isp24.exchange_addr;
1594 pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
1595 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1596 pkt->u.status0.ox_id = cpu_to_le16(temp);
1597 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
1603 * ha->hardware_lock supposed to be held on entry. We have already made sure
1604 * that there is sufficient amount of request entries to not drop it.
1606 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
1607 struct scsi_qla_host *vha)
1610 uint32_t *dword_ptr;
1611 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1613 /* Build continuation packets */
1614 while (prm->seg_cnt > 0) {
1615 cont_a64_entry_t *cont_pkt64 =
1616 (cont_a64_entry_t *)qlt_get_req_pkt(vha);
1619 * Make sure that from cont_pkt64 none of
1620 * 64-bit specific fields used for 32-bit
1621 * addressing. Cast to (cont_entry_t *) for
1625 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
1627 cont_pkt64->entry_count = 1;
1628 cont_pkt64->sys_define = 0;
1630 if (enable_64bit_addressing) {
1631 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
1633 (uint32_t *)&cont_pkt64->dseg_0_address;
1635 cont_pkt64->entry_type = CONTINUE_TYPE;
1637 (uint32_t *)&((cont_entry_t *)
1638 cont_pkt64)->dseg_0_address;
1641 /* Load continuation entry data segments */
1643 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
1644 cnt++, prm->seg_cnt--) {
1646 cpu_to_le32(pci_dma_lo32
1647 (sg_dma_address(prm->sg)));
1648 if (enable_64bit_addressing) {
1650 cpu_to_le32(pci_dma_hi32
1654 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1656 prm->sg = sg_next(prm->sg);
1662 * ha->hardware_lock supposed to be held on entry. We have already made sure
1663 * that there is sufficient amount of request entries to not drop it.
1665 static void qlt_load_data_segments(struct qla_tgt_prm *prm,
1666 struct scsi_qla_host *vha)
1669 uint32_t *dword_ptr;
1670 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1671 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
1673 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
1675 /* Setup packet address segment pointer */
1676 dword_ptr = pkt24->u.status0.dseg_0_address;
1678 /* Set total data segment count */
1680 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
1682 if (prm->seg_cnt == 0) {
1683 /* No data transfer */
1689 /* If scatter gather */
1691 /* Load command entry data segments */
1693 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
1694 cnt++, prm->seg_cnt--) {
1696 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
1697 if (enable_64bit_addressing) {
1699 cpu_to_le32(pci_dma_hi32(
1700 sg_dma_address(prm->sg)));
1702 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1704 prm->sg = sg_next(prm->sg);
1707 qlt_load_cont_data_segments(prm, vha);
1710 static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
1712 return cmd->bufflen > 0;
1716 * Called without ha->hardware_lock held
1718 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1719 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
1720 uint32_t *full_req_cnt)
1722 struct qla_tgt *tgt = cmd->tgt;
1723 struct scsi_qla_host *vha = tgt->vha;
1724 struct qla_hw_data *ha = vha->hw;
1725 struct se_cmd *se_cmd = &cmd->se_cmd;
1727 if (unlikely(cmd->aborted)) {
1728 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
1729 "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)",
1730 vha->vp_idx, cmd, se_cmd, se_cmd->tag);
1732 cmd->state = QLA_TGT_STATE_ABORTED;
1733 cmd->cmd_flags |= BIT_6;
1735 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
1737 /* !! At this point cmd could be already freed !! */
1738 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
1743 prm->rq_result = scsi_status;
1744 prm->sense_buffer = &cmd->sense_buffer[0];
1745 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
1749 prm->add_status_pkt = 0;
1751 /* Send marker if required */
1752 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
1755 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
1756 if (qlt_pci_map_calc_cnt(prm) != 0)
1760 *full_req_cnt = prm->req_cnt;
1762 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1763 prm->residual = se_cmd->residual_count;
1764 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
1765 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
1766 prm->residual, se_cmd->tag,
1767 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
1768 cmd->bufflen, prm->rq_result);
1769 prm->rq_result |= SS_RESIDUAL_UNDER;
1770 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1771 prm->residual = se_cmd->residual_count;
1772 ql_dbg(ql_dbg_io, vha, 0x305d,
1773 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
1774 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
1775 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
1776 prm->rq_result |= SS_RESIDUAL_OVER;
1779 if (xmit_type & QLA_TGT_XMIT_STATUS) {
1781 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
1782 * ignored in *xmit_response() below
1784 if (qlt_has_data(cmd)) {
1785 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
1786 (IS_FWI2_CAPABLE(ha) &&
1787 (prm->rq_result != 0))) {
1788 prm->add_status_pkt = 1;
1797 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
1798 struct qla_tgt_cmd *cmd, int sending_sense)
1800 if (ha->tgt.enable_class_2)
1804 return cmd->conf_compl_supported;
1806 return ha->tgt.enable_explicit_conf &&
1807 cmd->conf_compl_supported;
1810 #ifdef CONFIG_QLA_TGT_DEBUG_SRR
1812 * Original taken from the XFS code
1814 static unsigned long qlt_srr_random(void)
1817 static unsigned long RandomValue;
1818 static DEFINE_SPINLOCK(lock);
1819 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
1823 unsigned long flags;
1825 spin_lock_irqsave(&lock, flags);
1827 RandomValue = jiffies;
1833 rv = 16807 * lo - 2836 * hi;
1837 spin_unlock_irqrestore(&lock, flags);
1841 static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
1843 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
1844 if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
1846 *xmit_type &= ~QLA_TGT_XMIT_STATUS;
1847 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
1848 "Dropping cmd %p (tag %d) status", cmd, se_cmd->tag);
1852 * It's currently not possible to simulate SRRs for FCP_WRITE without
1853 * a physical link layer failure, so don't even try here..
1855 if (cmd->dma_data_direction != DMA_FROM_DEVICE)
1858 if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
1859 ((qlt_srr_random() % 100) == 20)) {
1861 unsigned int tot_len = 0;
1864 leave = qlt_srr_random() % cmd->sg_cnt;
1866 for (i = 0; i < leave; i++)
1867 tot_len += cmd->sg[i].length;
1869 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
1870 "Cutting cmd %p (tag %d) buffer"
1871 " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
1872 " cmd->sg_cnt %d)", cmd, se_cmd->tag, tot_len, leave,
1873 cmd->bufflen, cmd->sg_cnt);
1875 cmd->bufflen = tot_len;
1876 cmd->sg_cnt = leave;
1879 if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
1880 unsigned int offset = qlt_srr_random() % cmd->bufflen;
1882 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
1883 "Cutting cmd %p (tag %d) buffer head "
1884 "to offset %d (cmd->bufflen %d)", cmd, se_cmd->tag, offset,
1887 *xmit_type &= ~QLA_TGT_XMIT_DATA;
1888 else if (qlt_set_data_offset(cmd, offset)) {
1889 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
1890 "qlt_set_data_offset() failed (tag %d)", se_cmd->tag);
1895 static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
1899 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
1900 struct qla_tgt_prm *prm)
1902 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
1903 (uint32_t)sizeof(ctio->u.status1.sense_data));
1904 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
1905 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
1906 ctio->u.status0.flags |= cpu_to_le16(
1907 CTIO7_FLAGS_EXPLICIT_CONFORM |
1908 CTIO7_FLAGS_CONFORM_REQ);
1910 ctio->u.status0.residual = cpu_to_le32(prm->residual);
1911 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
1912 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
1915 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
1916 if (prm->cmd->se_cmd.scsi_status != 0) {
1917 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
1918 "Skipping EXPLICIT_CONFORM and "
1919 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
1920 "non GOOD status\n");
1921 goto skip_explict_conf;
1923 ctio->u.status1.flags |= cpu_to_le16(
1924 CTIO7_FLAGS_EXPLICIT_CONFORM |
1925 CTIO7_FLAGS_CONFORM_REQ);
1928 ctio->u.status1.flags &=
1929 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
1930 ctio->u.status1.flags |=
1931 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
1932 ctio->u.status1.scsi_status |=
1933 cpu_to_le16(SS_SENSE_LEN_VALID);
1934 ctio->u.status1.sense_length =
1935 cpu_to_le16(prm->sense_buffer_len);
1936 for (i = 0; i < prm->sense_buffer_len/4; i++)
1937 ((uint32_t *)ctio->u.status1.sense_data)[i] =
1938 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
1940 if (unlikely((prm->sense_buffer_len % 4) != 0)) {
1943 ql_dbg(ql_dbg_tgt, vha, 0xe04f,
1944 "qla_target(%d): %d bytes of sense "
1945 "lost", prm->tgt->ha->vp_idx,
1946 prm->sense_buffer_len % 4);
1952 ctio->u.status1.flags &=
1953 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
1954 ctio->u.status1.flags |=
1955 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
1956 ctio->u.status1.sense_length = 0;
1957 memset(ctio->u.status1.sense_data, 0,
1958 sizeof(ctio->u.status1.sense_data));
1961 /* Sense with len > 24, is it possible ??? */
1968 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
1971 * Uncomment when corresponding SCSI changes are done.
1973 if (!sp->cmd->prot_chk)
1977 switch (se_cmd->prot_op) {
1978 case TARGET_PROT_DOUT_INSERT:
1979 case TARGET_PROT_DIN_STRIP:
1980 if (ql2xenablehba_err_chk >= 1)
1983 case TARGET_PROT_DOUT_PASS:
1984 case TARGET_PROT_DIN_PASS:
1985 if (ql2xenablehba_err_chk >= 2)
1988 case TARGET_PROT_DIN_INSERT:
1989 case TARGET_PROT_DOUT_STRIP:
1998 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
2002 qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
2004 uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2006 /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
2007 * have been immplemented by TCM, before AppTag is avail.
2008 * Look for modesense_handlers[]
2011 ctx->app_tag_mask[0] = 0x0;
2012 ctx->app_tag_mask[1] = 0x0;
2014 switch (se_cmd->prot_type) {
2015 case TARGET_DIF_TYPE0_PROT:
2017 * No check for ql2xenablehba_err_chk, as it would be an
2018 * I/O error if hba tag generation is not done.
2020 ctx->ref_tag = cpu_to_le32(lba);
2022 if (!qlt_hba_err_chk_enabled(se_cmd))
2025 /* enable ALL bytes of the ref tag */
2026 ctx->ref_tag_mask[0] = 0xff;
2027 ctx->ref_tag_mask[1] = 0xff;
2028 ctx->ref_tag_mask[2] = 0xff;
2029 ctx->ref_tag_mask[3] = 0xff;
2032 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
2035 case TARGET_DIF_TYPE1_PROT:
2036 ctx->ref_tag = cpu_to_le32(lba);
2038 if (!qlt_hba_err_chk_enabled(se_cmd))
2041 /* enable ALL bytes of the ref tag */
2042 ctx->ref_tag_mask[0] = 0xff;
2043 ctx->ref_tag_mask[1] = 0xff;
2044 ctx->ref_tag_mask[2] = 0xff;
2045 ctx->ref_tag_mask[3] = 0xff;
2048 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
2049 * match LBA in CDB + N
2051 case TARGET_DIF_TYPE2_PROT:
2052 ctx->ref_tag = cpu_to_le32(lba);
2054 if (!qlt_hba_err_chk_enabled(se_cmd))
2057 /* enable ALL bytes of the ref tag */
2058 ctx->ref_tag_mask[0] = 0xff;
2059 ctx->ref_tag_mask[1] = 0xff;
2060 ctx->ref_tag_mask[2] = 0xff;
2061 ctx->ref_tag_mask[3] = 0xff;
2064 /* For Type 3 protection: 16 bit GUARD only */
2065 case TARGET_DIF_TYPE3_PROT:
2066 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
2067 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
2074 qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2077 uint32_t transfer_length = 0;
2078 uint32_t data_bytes;
2080 uint8_t bundling = 1;
2082 struct crc_context *crc_ctx_pkt = NULL;
2083 struct qla_hw_data *ha;
2084 struct ctio_crc2_to_fw *pkt;
2085 dma_addr_t crc_ctx_dma;
2086 uint16_t fw_prot_opts = 0;
2087 struct qla_tgt_cmd *cmd = prm->cmd;
2088 struct se_cmd *se_cmd = &cmd->se_cmd;
2090 struct atio_from_isp *atio = &prm->cmd->atio;
2095 pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
2097 memset(pkt, 0, sizeof(*pkt));
2099 ql_dbg(ql_dbg_tgt, vha, 0xe071,
2100 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
2101 vha->vp_idx, __func__, se_cmd, se_cmd->prot_op,
2102 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
2104 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
2105 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
2108 /* Compute dif len and adjust data len to incude protection */
2109 data_bytes = cmd->bufflen;
2110 dif_bytes = (data_bytes / cmd->blk_sz) * 8;
2112 switch (se_cmd->prot_op) {
2113 case TARGET_PROT_DIN_INSERT:
2114 case TARGET_PROT_DOUT_STRIP:
2115 transfer_length = data_bytes;
2116 data_bytes += dif_bytes;
2119 case TARGET_PROT_DIN_STRIP:
2120 case TARGET_PROT_DOUT_INSERT:
2121 case TARGET_PROT_DIN_PASS:
2122 case TARGET_PROT_DOUT_PASS:
2123 transfer_length = data_bytes + dif_bytes;
2131 if (!qlt_hba_err_chk_enabled(se_cmd))
2132 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
2133 /* HBA error checking enabled */
2134 else if (IS_PI_UNINIT_CAPABLE(ha)) {
2135 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2136 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2137 fw_prot_opts |= PO_DIS_VALD_APP_ESC;
2138 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2139 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2142 switch (se_cmd->prot_op) {
2143 case TARGET_PROT_DIN_INSERT:
2144 case TARGET_PROT_DOUT_INSERT:
2145 fw_prot_opts |= PO_MODE_DIF_INSERT;
2147 case TARGET_PROT_DIN_STRIP:
2148 case TARGET_PROT_DOUT_STRIP:
2149 fw_prot_opts |= PO_MODE_DIF_REMOVE;
2151 case TARGET_PROT_DIN_PASS:
2152 case TARGET_PROT_DOUT_PASS:
2153 fw_prot_opts |= PO_MODE_DIF_PASS;
2154 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
2156 default:/* Normal Request */
2157 fw_prot_opts |= PO_MODE_DIF_PASS;
2163 /* Update entry type to indicate Command Type CRC_2 IOCB */
2164 pkt->entry_type = CTIO_CRC2;
2165 pkt->entry_count = 1;
2166 pkt->vp_index = vha->vp_idx;
2168 h = qlt_make_handle(vha);
2169 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2171 * CTIO type 7 from the firmware doesn't provide a way to
2172 * know the initiator's LOOP ID, hence we can't find
2173 * the session and, so, the command.
2177 ha->tgt.cmds[h-1] = prm->cmd;
2180 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
2181 pkt->nport_handle = prm->cmd->loop_id;
2182 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2183 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2184 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2185 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2186 pkt->exchange_addr = atio->u.isp24.exchange_addr;
2188 /* silence compile warning */
2189 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2190 pkt->ox_id = cpu_to_le16(t16);
2192 t16 = (atio->u.isp24.attr << 9);
2193 pkt->flags |= cpu_to_le16(t16);
2194 pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
2196 /* Set transfer direction */
2197 if (cmd->dma_data_direction == DMA_TO_DEVICE)
2198 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
2199 else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2200 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
2203 pkt->dseg_count = prm->tot_dsds;
2204 /* Fibre channel byte count */
2205 pkt->transfer_length = cpu_to_le32(transfer_length);
2208 /* ----- CRC context -------- */
2210 /* Allocate CRC context from global pool */
2211 crc_ctx_pkt = cmd->ctx =
2212 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
2215 goto crc_queuing_error;
2217 /* Zero out CTX area. */
2218 clr_ptr = (uint8_t *)crc_ctx_pkt;
2219 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
2221 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
2222 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
2225 crc_ctx_pkt->handle = pkt->handle;
2227 qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
2229 pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
2230 pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
2231 pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
2235 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
2238 * Configure Bundling if we need to fetch interlaving
2239 * protection PCI accesses
2241 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
2242 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
2243 crc_ctx_pkt->u.bundling.dseg_count =
2244 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
2245 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
2248 /* Finish the common fields of CRC pkt */
2249 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
2250 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
2251 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
2252 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
2255 /* Walks data segments */
2256 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
2258 if (!bundling && prm->prot_seg_cnt) {
2259 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
2260 prm->tot_dsds, cmd))
2261 goto crc_queuing_error;
2262 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
2263 (prm->tot_dsds - prm->prot_seg_cnt), cmd))
2264 goto crc_queuing_error;
2266 if (bundling && prm->prot_seg_cnt) {
2267 /* Walks dif segments */
2268 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
2270 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
2271 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
2272 prm->prot_seg_cnt, cmd))
2273 goto crc_queuing_error;
2278 /* Cleanup will be performed by the caller */
2280 return QLA_FUNCTION_FAILED;
2285 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2286 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
2288 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2289 uint8_t scsi_status)
2291 struct scsi_qla_host *vha = cmd->vha;
2292 struct qla_hw_data *ha = vha->hw;
2293 struct ctio7_to_24xx *pkt;
2294 struct qla_tgt_prm prm;
2295 uint32_t full_req_cnt = 0;
2296 unsigned long flags = 0;
2299 memset(&prm, 0, sizeof(prm));
2300 qlt_check_srr_debug(cmd, &xmit_type);
2302 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
2303 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
2304 (xmit_type & QLA_TGT_XMIT_STATUS) ?
2305 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
2308 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
2310 if (unlikely(res != 0)) {
2311 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
2317 spin_lock_irqsave(&ha->hardware_lock, flags);
2319 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
2321 * Either a chip reset is active or this request was from
2322 * previous life, just abort the processing.
2324 cmd->state = QLA_TGT_STATE_PROCESSED;
2325 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2326 ql_dbg(ql_dbg_async, vha, 0xe101,
2327 "RESET-RSP active/old-count/new-count = %d/%d/%d.\n",
2328 qla2x00_reset_active(vha), cmd->reset_count,
2330 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2334 /* Does F/W have an IOCBs for this request */
2335 res = qlt_check_reserve_free_req(vha, full_req_cnt);
2337 goto out_unmap_unlock;
2339 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
2340 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2342 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2343 if (unlikely(res != 0))
2344 goto out_unmap_unlock;
2347 pkt = (struct ctio7_to_24xx *)prm.pkt;
2349 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
2350 pkt->u.status0.flags |=
2351 cpu_to_le16(CTIO7_FLAGS_DATA_IN |
2352 CTIO7_FLAGS_STATUS_MODE_0);
2354 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
2355 qlt_load_data_segments(&prm, vha);
2357 if (prm.add_status_pkt == 0) {
2358 if (xmit_type & QLA_TGT_XMIT_STATUS) {
2359 pkt->u.status0.scsi_status =
2360 cpu_to_le16(prm.rq_result);
2361 pkt->u.status0.residual =
2362 cpu_to_le32(prm.residual);
2363 pkt->u.status0.flags |= cpu_to_le16(
2364 CTIO7_FLAGS_SEND_STATUS);
2365 if (qlt_need_explicit_conf(ha, cmd, 0)) {
2366 pkt->u.status0.flags |=
2368 CTIO7_FLAGS_EXPLICIT_CONFORM |
2369 CTIO7_FLAGS_CONFORM_REQ);
2375 * We have already made sure that there is sufficient
2376 * amount of request entries to not drop HW lock in
2379 struct ctio7_to_24xx *ctio =
2380 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
2382 ql_dbg(ql_dbg_io, vha, 0x305e,
2383 "Building additional status packet 0x%p.\n",
2387 * T10Dif: ctio_crc2_to_fw overlay ontop of
2390 memcpy(ctio, pkt, sizeof(*ctio));
2391 /* reset back to CTIO7 */
2392 ctio->entry_count = 1;
2393 ctio->entry_type = CTIO_TYPE7;
2394 ctio->dseg_count = 0;
2395 ctio->u.status1.flags &= ~cpu_to_le16(
2396 CTIO7_FLAGS_DATA_IN);
2398 /* Real finish is ctio_m1's finish */
2399 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
2400 pkt->u.status0.flags |= cpu_to_le16(
2401 CTIO7_FLAGS_DONT_RET_CTIO);
2403 /* qlt_24xx_init_ctio_to_isp will correct
2404 * all neccessary fields that's part of CTIO7.
2405 * There should be no residual of CTIO-CRC2 data.
2407 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
2409 pr_debug("Status CTIO7: %p\n", ctio);
2412 qlt_24xx_init_ctio_to_isp(pkt, &prm);
2415 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
2416 cmd->cmd_sent_to_fw = 1;
2418 /* Memory Barrier */
2420 qla2x00_start_iocbs(vha, vha->req);
2421 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2426 qlt_unmap_sg(vha, cmd);
2427 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2431 EXPORT_SYMBOL(qlt_xmit_response);
2433 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2435 struct ctio7_to_24xx *pkt;
2436 struct scsi_qla_host *vha = cmd->vha;
2437 struct qla_hw_data *ha = vha->hw;
2438 struct qla_tgt *tgt = cmd->tgt;
2439 struct qla_tgt_prm prm;
2440 unsigned long flags;
2443 memset(&prm, 0, sizeof(prm));
2449 /* Send marker if required */
2450 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
2453 /* Calculate number of entries and segments required */
2454 if (qlt_pci_map_calc_cnt(&prm) != 0)
2457 spin_lock_irqsave(&ha->hardware_lock, flags);
2459 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
2461 * Either a chip reset is active or this request was from
2462 * previous life, just abort the processing.
2464 cmd->state = QLA_TGT_STATE_NEED_DATA;
2465 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2466 ql_dbg(ql_dbg_async, vha, 0xe102,
2467 "RESET-XFR active/old-count/new-count = %d/%d/%d.\n",
2468 qla2x00_reset_active(vha), cmd->reset_count,
2470 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2474 /* Does F/W have an IOCBs for this request */
2475 res = qlt_check_reserve_free_req(vha, prm.req_cnt);
2477 goto out_unlock_free_unmap;
2478 if (cmd->se_cmd.prot_op)
2479 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2481 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2483 if (unlikely(res != 0))
2484 goto out_unlock_free_unmap;
2485 pkt = (struct ctio7_to_24xx *)prm.pkt;
2486 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2487 CTIO7_FLAGS_STATUS_MODE_0);
2489 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
2490 qlt_load_data_segments(&prm, vha);
2492 cmd->state = QLA_TGT_STATE_NEED_DATA;
2493 cmd->cmd_sent_to_fw = 1;
2495 /* Memory Barrier */
2497 qla2x00_start_iocbs(vha, vha->req);
2498 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2502 out_unlock_free_unmap:
2503 qlt_unmap_sg(vha, cmd);
2504 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2508 EXPORT_SYMBOL(qlt_rdy_to_xfer);
2512 * Checks the guard or meta-data for the type of error
2513 * detected by the HBA.
2516 qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
2517 struct ctio_crc_from_fw *sts)
2519 uint8_t *ap = &sts->actual_dif[0];
2520 uint8_t *ep = &sts->expected_dif[0];
2521 uint32_t e_ref_tag, a_ref_tag;
2522 uint16_t e_app_tag, a_app_tag;
2523 uint16_t e_guard, a_guard;
2524 uint64_t lba = cmd->se_cmd.t_task_lba;
2526 a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
2527 a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
2528 a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
2530 e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
2531 e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
2532 e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
2534 ql_dbg(ql_dbg_tgt, vha, 0xe075,
2535 "iocb(s) %p Returned STATUS.\n", sts);
2537 ql_dbg(ql_dbg_tgt, vha, 0xf075,
2538 "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
2539 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2540 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
2544 * For type 3: ref & app tag is all 'f's
2545 * For type 0,1,2: app tag is all 'f's
2547 if ((a_app_tag == 0xffff) &&
2548 ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
2549 (a_ref_tag == 0xffffffff))) {
2550 uint32_t blocks_done;
2552 /* 2TB boundary case covered automatically with this */
2553 blocks_done = e_ref_tag - (uint32_t)lba + 1;
2554 cmd->se_cmd.bad_sector = e_ref_tag;
2555 cmd->se_cmd.pi_err = 0;
2556 ql_dbg(ql_dbg_tgt, vha, 0xf074,
2557 "need to return scsi good\n");
2559 /* Update protection tag */
2560 if (cmd->prot_sg_cnt) {
2561 uint32_t i, k = 0, num_ent;
2562 struct scatterlist *sg, *sgl;
2567 /* Patch the corresponding protection tags */
2568 for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
2569 num_ent = sg_dma_len(sg) / 8;
2570 if (k + num_ent < blocks_done) {
2578 if (k != blocks_done) {
2579 ql_log(ql_log_warn, vha, 0xf076,
2580 "unexpected tag values tag:lba=%u:%llu)\n",
2581 e_ref_tag, (unsigned long long)lba);
2586 struct sd_dif_tuple *spt;
2588 * This section came from initiator. Is it valid here?
2589 * should ulp be override with actual val???
2591 spt = page_address(sg_page(sg)) + sg->offset;
2594 spt->app_tag = 0xffff;
2595 if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
2596 spt->ref_tag = 0xffffffff;
2604 if (e_guard != a_guard) {
2605 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
2606 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
2608 ql_log(ql_log_warn, vha, 0xe076,
2609 "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2610 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2611 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
2612 a_guard, e_guard, cmd);
2617 if (e_ref_tag != a_ref_tag) {
2618 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
2619 cmd->se_cmd.bad_sector = e_ref_tag;
2621 ql_log(ql_log_warn, vha, 0xe077,
2622 "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2623 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2624 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
2625 a_guard, e_guard, cmd);
2629 /* check appl tag */
2630 if (e_app_tag != a_app_tag) {
2631 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
2632 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
2634 ql_log(ql_log_warn, vha, 0xe078,
2635 "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2636 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2637 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
2638 a_guard, e_guard, cmd);
2646 /* If hardware_lock held on entry, might drop it, then reaquire */
2647 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2648 static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2649 struct qla_tgt_cmd *cmd,
2650 struct atio_from_isp *atio)
2652 struct ctio7_to_24xx *ctio24;
2653 struct qla_hw_data *ha = vha->hw;
2658 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
2660 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
2662 ql_dbg(ql_dbg_tgt, vha, 0xe050,
2663 "qla_target(%d): %s failed: unable to allocate "
2664 "request packet\n", vha->vp_idx, __func__);
2669 if (cmd->state < QLA_TGT_STATE_PROCESSED) {
2670 ql_dbg(ql_dbg_tgt, vha, 0xe051,
2671 "qla_target(%d): Terminating cmd %p with "
2672 "incorrect state %d\n", vha->vp_idx, cmd,
2678 pkt->entry_count = 1;
2679 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2681 ctio24 = (struct ctio7_to_24xx *)pkt;
2682 ctio24->entry_type = CTIO_TYPE7;
2683 ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
2684 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2685 ctio24->vp_index = vha->vp_idx;
2686 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2687 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2688 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2689 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
2690 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
2691 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
2692 CTIO7_FLAGS_TERMINATE);
2693 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2694 ctio24->u.status1.ox_id = cpu_to_le16(temp);
2696 /* Most likely, it isn't needed */
2697 ctio24->u.status1.residual = get_unaligned((uint32_t *)
2698 &atio->u.isp24.fcp_cmnd.add_cdb[
2699 atio->u.isp24.fcp_cmnd.add_cdb_len]);
2700 if (ctio24->u.status1.residual != 0)
2701 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
2703 /* Memory Barrier */
2705 qla2x00_start_iocbs(vha, vha->req);
2709 static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2710 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
2712 unsigned long flags;
2715 if (qlt_issue_marker(vha, ha_locked) < 0)
2719 rc = __qlt_send_term_exchange(vha, cmd, atio);
2721 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
2724 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
2725 rc = __qlt_send_term_exchange(vha, cmd, atio);
2727 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
2728 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2731 if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
2732 !cmd->cmd_sent_to_fw)) {
2733 if (!ha_locked && !in_interrupt())
2734 msleep(250); /* just in case */
2736 qlt_unmap_sg(vha, cmd);
2737 vha->hw->tgt.tgt_ops->free_cmd(cmd);
2742 static void qlt_init_term_exchange(struct scsi_qla_host *vha)
2744 struct list_head free_list;
2745 struct qla_tgt_cmd *cmd, *tcmd;
2747 vha->hw->tgt.leak_exchg_thresh_hold =
2748 (vha->hw->fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
2751 if (!list_empty(&vha->hw->tgt.q_full_list)) {
2752 INIT_LIST_HEAD(&free_list);
2753 list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
2755 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
2756 list_del(&cmd->cmd_list);
2757 /* This cmd was never sent to TCM. There is no need
2758 * to schedule free or call free_cmd
2761 vha->hw->tgt.num_qfull_cmds_alloc--;
2764 vha->hw->tgt.num_qfull_cmds_dropped = 0;
2767 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
2769 uint32_t total_leaked;
2771 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
2773 if (vha->hw->tgt.leak_exchg_thresh_hold &&
2774 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
2776 ql_dbg(ql_dbg_tgt, vha, 0xe079,
2777 "Chip reset due to exchange starvation: %d/%d.\n",
2778 total_leaked, vha->hw->fw_xcb_count);
2780 if (IS_P3P_TYPE(vha->hw))
2781 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2783 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2784 qla2xxx_wake_dpc(vha);
2789 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2791 struct qla_tgt_sess *sess = cmd->sess;
2793 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
2794 "%s: se_cmd[%p] ox_id %04x\n",
2795 __func__, &cmd->se_cmd,
2796 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
2798 BUG_ON(cmd->cmd_in_wq);
2801 qlt_decr_num_pend_cmds(cmd->vha);
2803 BUG_ON(cmd->sg_mapped);
2804 cmd->jiffies_at_free = get_jiffies_64();
2805 if (unlikely(cmd->free_sg))
2808 if (!sess || !sess->se_sess) {
2812 cmd->jiffies_at_free = get_jiffies_64();
2813 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
2815 EXPORT_SYMBOL(qlt_free_cmd);
2817 /* ha->hardware_lock supposed to be held on entry */
2818 static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
2819 struct qla_tgt_cmd *cmd, void *ctio)
2821 struct qla_tgt_srr_ctio *sc;
2822 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2823 struct qla_tgt_srr_imm *imm;
2826 cmd->cmd_flags |= BIT_15;
2828 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
2829 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
2832 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
2833 "qla_target(%d): SRR CTIO, but ctio is NULL\n",
2838 sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
2841 /* IRQ is already OFF */
2842 spin_lock(&tgt->srr_lock);
2843 sc->srr_id = tgt->ctio_srr_id;
2844 list_add_tail(&sc->srr_list_entry,
2845 &tgt->srr_ctio_list);
2846 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
2847 "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
2848 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
2850 list_for_each_entry(imm, &tgt->srr_imm_list,
2852 if (imm->srr_id == sc->srr_id) {
2858 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
2859 "Scheduling srr work\n");
2860 schedule_work(&tgt->srr_work);
2862 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
2863 "qla_target(%d): imm_srr_id "
2864 "== ctio_srr_id (%d), but there is no "
2865 "corresponding SRR IMM, deleting CTIO "
2866 "SRR %p\n", vha->vp_idx,
2867 tgt->ctio_srr_id, sc);
2868 list_del(&sc->srr_list_entry);
2869 spin_unlock(&tgt->srr_lock);
2875 spin_unlock(&tgt->srr_lock);
2877 struct qla_tgt_srr_imm *ti;
2879 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
2880 "qla_target(%d): Unable to allocate SRR CTIO entry\n",
2882 spin_lock(&tgt->srr_lock);
2883 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
2885 if (imm->srr_id == tgt->ctio_srr_id) {
2886 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
2887 "IMM SRR %p deleted (id %d)\n",
2889 list_del(&imm->srr_list_entry);
2890 qlt_reject_free_srr_imm(vha, imm, 1);
2893 spin_unlock(&tgt->srr_lock);
2902 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2904 static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
2905 struct qla_tgt_cmd *cmd, uint32_t status)
2910 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
2912 cpu_to_le16(OF_TERM_EXCH));
2917 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
2922 /* ha->hardware_lock supposed to be held on entry */
2923 static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
2926 struct qla_hw_data *ha = vha->hw;
2929 if (ha->tgt.cmds[handle] != NULL) {
2930 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
2931 ha->tgt.cmds[handle] = NULL;
2937 /* ha->hardware_lock supposed to be held on entry */
2938 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
2939 uint32_t handle, void *ctio)
2941 struct qla_tgt_cmd *cmd = NULL;
2943 /* Clear out internal marks */
2944 handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
2945 CTIO_INTERMEDIATE_HANDLE_MARK);
2947 if (handle != QLA_TGT_NULL_HANDLE) {
2948 if (unlikely(handle == QLA_TGT_SKIP_HANDLE))
2951 /* handle-1 is actually used */
2952 if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
2953 ql_dbg(ql_dbg_tgt, vha, 0xe052,
2954 "qla_target(%d): Wrong handle %x received\n",
2955 vha->vp_idx, handle);
2958 cmd = qlt_get_cmd(vha, handle);
2959 if (unlikely(cmd == NULL)) {
2960 ql_dbg(ql_dbg_tgt, vha, 0xe053,
2961 "qla_target(%d): Suspicious: unable to "
2962 "find the command with handle %x\n", vha->vp_idx,
2966 } else if (ctio != NULL) {
2967 /* We can't get loop ID from CTIO7 */
2968 ql_dbg(ql_dbg_tgt, vha, 0xe054,
2969 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
2970 "support NULL handles\n", vha->vp_idx);
2977 /* hardware_lock should be held by caller. */
2979 qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2981 struct qla_hw_data *ha = vha->hw;
2985 qlt_unmap_sg(vha, cmd);
2987 handle = qlt_make_handle(vha);
2989 /* TODO: fix debug message type and ids. */
2990 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
2991 ql_dbg(ql_dbg_io, vha, 0xff00,
2992 "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle);
2993 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
2994 cmd->write_data_transferred = 0;
2995 cmd->state = QLA_TGT_STATE_DATA_IN;
2997 ql_dbg(ql_dbg_io, vha, 0xff01,
2998 "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle);
3000 ha->tgt.tgt_ops->handle_data(cmd);
3002 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
3003 ql_dbg(ql_dbg_io, vha, 0xff02,
3004 "HOST-ABORT: handle=%d, state=ABORTED.\n", handle);
3006 ql_dbg(ql_dbg_io, vha, 0xff03,
3007 "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
3012 cmd->cmd_flags |= BIT_12;
3013 ha->tgt.tgt_ops->free_cmd(cmd);
3017 qlt_host_reset_handler(struct qla_hw_data *ha)
3019 struct qla_tgt_cmd *cmd;
3020 unsigned long flags;
3021 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3022 scsi_qla_host_t *vha = NULL;
3023 struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt;
3026 if (!base_vha->hw->tgt.tgt_ops)
3029 if (!tgt || qla_ini_mode_enabled(base_vha)) {
3030 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
3031 "Target mode disabled\n");
3035 ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10,
3036 "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
3037 base_vha->dpc_flags);
3039 spin_lock_irqsave(&ha->hardware_lock, flags);
3040 for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) {
3041 cmd = qlt_get_cmd(base_vha, i);
3044 /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
3046 qlt_abort_cmd_on_host_reset(vha, cmd);
3048 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3053 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3055 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3056 uint32_t status, void *ctio)
3058 struct qla_hw_data *ha = vha->hw;
3059 struct se_cmd *se_cmd;
3060 struct qla_tgt_cmd *cmd;
3062 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
3063 /* That could happen only in case of an error/reset/abort */
3064 if (status != CTIO_SUCCESS) {
3065 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3066 "Intermediate CTIO received"
3067 " (status %x)\n", status);
3072 cmd = qlt_ctio_to_cmd(vha, handle, ctio);
3076 se_cmd = &cmd->se_cmd;
3077 cmd->cmd_sent_to_fw = 0;
3079 qlt_unmap_sg(vha, cmd);
3081 if (unlikely(status != CTIO_SUCCESS)) {
3082 switch (status & 0xFFFF) {
3083 case CTIO_LIP_RESET:
3084 case CTIO_TARGET_RESET:
3086 /* driver request abort via Terminate exchange */
3088 case CTIO_INVALID_RX_ID:
3090 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
3091 "qla_target(%d): CTIO with "
3092 "status %#x received, state %x, se_cmd %p, "
3093 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
3094 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
3095 status, cmd->state, se_cmd);
3098 case CTIO_PORT_LOGGED_OUT:
3099 case CTIO_PORT_UNAVAILABLE:
3100 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3101 "qla_target(%d): CTIO with PORT LOGGED "
3102 "OUT (29) or PORT UNAVAILABLE (28) status %x "
3103 "received (state %x, se_cmd %p)\n", vha->vp_idx,
3104 status, cmd->state, se_cmd);
3107 case CTIO_SRR_RECEIVED:
3108 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
3109 "qla_target(%d): CTIO with SRR_RECEIVED"
3110 " status %x received (state %x, se_cmd %p)\n",
3111 vha->vp_idx, status, cmd->state, se_cmd);
3112 if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
3117 case CTIO_DIF_ERROR: {
3118 struct ctio_crc_from_fw *crc =
3119 (struct ctio_crc_from_fw *)ctio;
3120 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
3121 "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
3122 vha->vp_idx, status, cmd->state, se_cmd,
3123 *((u64 *)&crc->actual_dif[0]),
3124 *((u64 *)&crc->expected_dif[0]));
3126 if (qlt_handle_dif_error(vha, cmd, ctio)) {
3127 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3128 /* scsi Write/xfer rdy complete */
3131 /* scsi read/xmit respond complete
3132 * call handle dif to send scsi status
3133 * rather than terminate exchange.
3135 cmd->state = QLA_TGT_STATE_PROCESSED;
3136 ha->tgt.tgt_ops->handle_dif_err(cmd);
3140 /* Need to generate a SCSI good completion.
3141 * because FW did not send scsi status.
3149 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
3150 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
3151 vha->vp_idx, status, cmd->state, se_cmd);
3156 /* "cmd->state == QLA_TGT_STATE_ABORTED" means
3157 * cmd is already aborted/terminated, we don't
3158 * need to terminate again. The exchange is already
3159 * cleaned up/freed at FW level. Just cleanup at driver
3162 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
3163 (cmd->state != QLA_TGT_STATE_ABORTED)) {
3164 cmd->cmd_flags |= BIT_13;
3165 if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
3171 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3173 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3174 cmd->state = QLA_TGT_STATE_DATA_IN;
3176 if (status == CTIO_SUCCESS)
3177 cmd->write_data_transferred = 1;
3179 ha->tgt.tgt_ops->handle_data(cmd);
3181 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
3182 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
3183 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
3185 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
3186 "qla_target(%d): A command in state (%d) should "
3187 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
3190 if (unlikely(status != CTIO_SUCCESS) &&
3191 (cmd->state != QLA_TGT_STATE_ABORTED)) {
3192 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
3197 ha->tgt.tgt_ops->free_cmd(cmd);
3200 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
3205 switch (task_codes) {
3206 case ATIO_SIMPLE_QUEUE:
3207 fcp_task_attr = TCM_SIMPLE_TAG;
3209 case ATIO_HEAD_OF_QUEUE:
3210 fcp_task_attr = TCM_HEAD_TAG;
3212 case ATIO_ORDERED_QUEUE:
3213 fcp_task_attr = TCM_ORDERED_TAG;
3215 case ATIO_ACA_QUEUE:
3216 fcp_task_attr = TCM_ACA_TAG;
3219 fcp_task_attr = TCM_SIMPLE_TAG;
3222 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
3223 "qla_target: unknown task code %x, use ORDERED instead\n",
3225 fcp_task_attr = TCM_ORDERED_TAG;
3229 return fcp_task_attr;
3232 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
3235 * Process context for I/O path into tcm_qla2xxx code
3237 static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3239 scsi_qla_host_t *vha = cmd->vha;
3240 struct qla_hw_data *ha = vha->hw;
3241 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3242 struct qla_tgt_sess *sess = cmd->sess;
3243 struct atio_from_isp *atio = &cmd->atio;
3245 unsigned long flags;
3246 uint32_t data_length;
3247 int ret, fcp_task_attr, data_dir, bidi = 0;
3250 cmd->cmd_flags |= BIT_1;
3254 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
3255 cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
3256 cmd->unpacked_lun = scsilun_to_int(
3257 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
3259 if (atio->u.isp24.fcp_cmnd.rddata &&
3260 atio->u.isp24.fcp_cmnd.wrdata) {
3262 data_dir = DMA_TO_DEVICE;
3263 } else if (atio->u.isp24.fcp_cmnd.rddata)
3264 data_dir = DMA_FROM_DEVICE;
3265 else if (atio->u.isp24.fcp_cmnd.wrdata)
3266 data_dir = DMA_TO_DEVICE;
3268 data_dir = DMA_NONE;
3270 fcp_task_attr = qlt_get_fcp_task_attr(vha,
3271 atio->u.isp24.fcp_cmnd.task_attr);
3272 data_length = be32_to_cpu(get_unaligned((uint32_t *)
3273 &atio->u.isp24.fcp_cmnd.add_cdb[
3274 atio->u.isp24.fcp_cmnd.add_cdb_len]));
3276 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
3277 fcp_task_attr, data_dir, bidi);
3281 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
3283 spin_lock_irqsave(&ha->hardware_lock, flags);
3284 ha->tgt.tgt_ops->put_sess(sess);
3285 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3289 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
3291 * cmd has not sent to target yet, so pass NULL as the second
3292 * argument to qlt_send_term_exchange() and free the memory here.
3294 cmd->cmd_flags |= BIT_2;
3295 spin_lock_irqsave(&ha->hardware_lock, flags);
3296 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
3298 qlt_decr_num_pend_cmds(vha);
3299 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3300 ha->tgt.tgt_ops->put_sess(sess);
3301 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3304 static void qlt_do_work(struct work_struct *work)
3306 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
3311 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
3312 struct qla_tgt_sess *sess,
3313 struct atio_from_isp *atio)
3315 struct se_session *se_sess = sess->se_sess;
3316 struct qla_tgt_cmd *cmd;
3319 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
3323 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
3324 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
3326 memcpy(&cmd->atio, atio, sizeof(*atio));
3327 cmd->state = QLA_TGT_STATE_NEW;
3328 cmd->tgt = vha->vha_tgt.qla_tgt;
3329 qlt_incr_num_pend_cmds(vha);
3331 cmd->se_cmd.map_tag = tag;
3333 cmd->loop_id = sess->loop_id;
3334 cmd->conf_compl_supported = sess->conf_compl_supported;
3339 static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *,
3342 static void qlt_create_sess_from_atio(struct work_struct *work)
3344 struct qla_tgt_sess_op *op = container_of(work,
3345 struct qla_tgt_sess_op, work);
3346 scsi_qla_host_t *vha = op->vha;
3347 struct qla_hw_data *ha = vha->hw;
3348 struct qla_tgt_sess *sess;
3349 struct qla_tgt_cmd *cmd;
3350 unsigned long flags;
3351 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
3353 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
3354 "qla_target(%d): Unable to find wwn login"
3355 " (s_id %x:%x:%x), trying to create it manually\n",
3356 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
3358 if (op->atio.u.raw.entry_count > 1) {
3359 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
3360 "Dropping multy entry atio %p\n", &op->atio);
3364 mutex_lock(&vha->vha_tgt.tgt_mutex);
3365 sess = qlt_make_local_sess(vha, s_id);
3366 /* sess has an extra creation ref. */
3367 mutex_unlock(&vha->vha_tgt.tgt_mutex);
3372 * Now obtain a pre-allocated session tag using the original op->atio
3373 * packet header, and dispatch into __qlt_do_work() using the existing
3376 cmd = qlt_get_tag(vha, sess, &op->atio);
3378 spin_lock_irqsave(&ha->hardware_lock, flags);
3379 qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
3380 ha->tgt.tgt_ops->put_sess(sess);
3381 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3386 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
3387 * the extra reference taken above by qlt_make_local_sess()
3394 spin_lock_irqsave(&ha->hardware_lock, flags);
3395 qlt_send_term_exchange(vha, NULL, &op->atio, 1);
3396 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3401 /* ha->hardware_lock supposed to be held on entry */
3402 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3403 struct atio_from_isp *atio)
3405 struct qla_hw_data *ha = vha->hw;
3406 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3407 struct qla_tgt_sess *sess;
3408 struct qla_tgt_cmd *cmd;
3410 if (unlikely(tgt->tgt_stop)) {
3411 ql_dbg(ql_dbg_io, vha, 0x3061,
3412 "New command while device %p is shutting down\n", tgt);
3416 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
3417 if (unlikely(!sess)) {
3418 struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
3423 memcpy(&op->atio, atio, sizeof(*atio));
3425 INIT_WORK(&op->work, qlt_create_sess_from_atio);
3426 queue_work(qla_tgt_wq, &op->work);
3430 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
3432 kref_get(&sess->se_sess->sess_kref);
3434 cmd = qlt_get_tag(vha, sess, atio);
3436 ql_dbg(ql_dbg_io, vha, 0x3062,
3437 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
3438 ha->tgt.tgt_ops->put_sess(sess);
3443 cmd->jiffies_at_alloc = get_jiffies_64();
3445 cmd->reset_count = vha->hw->chip_reset;
3448 cmd->cmd_flags |= BIT_0;
3449 INIT_WORK(&cmd->work, qlt_do_work);
3450 queue_work(qla_tgt_wq, &cmd->work);
3455 /* ha->hardware_lock supposed to be held on entry */
3456 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
3457 int fn, void *iocb, int flags)
3459 struct scsi_qla_host *vha = sess->vha;
3460 struct qla_hw_data *ha = vha->hw;
3461 struct qla_tgt_mgmt_cmd *mcmd;
3465 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
3467 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
3468 "qla_target(%d): Allocation of management "
3469 "command failed, some commands and their data could "
3470 "leak\n", vha->vp_idx);
3473 memset(mcmd, 0, sizeof(*mcmd));
3477 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
3478 sizeof(mcmd->orig_iocb.imm_ntfy));
3480 mcmd->tmr_func = fn;
3481 mcmd->flags = flags;
3482 mcmd->reset_count = vha->hw->chip_reset;
3485 case QLA_TGT_CLEAR_ACA:
3486 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
3487 "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
3488 tmr_func = TMR_CLEAR_ACA;
3491 case QLA_TGT_TARGET_RESET:
3492 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
3493 "qla_target(%d): TARGET_RESET received\n",
3495 tmr_func = TMR_TARGET_WARM_RESET;
3498 case QLA_TGT_LUN_RESET:
3499 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
3500 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
3501 tmr_func = TMR_LUN_RESET;
3504 case QLA_TGT_CLEAR_TS:
3505 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
3506 "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
3507 tmr_func = TMR_CLEAR_TASK_SET;
3510 case QLA_TGT_ABORT_TS:
3511 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
3512 "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
3513 tmr_func = TMR_ABORT_TASK_SET;
3516 case QLA_TGT_ABORT_ALL:
3517 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
3518 "qla_target(%d): Doing ABORT_ALL_TASKS\n",
3523 case QLA_TGT_ABORT_ALL_SESS:
3524 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
3525 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
3530 case QLA_TGT_NEXUS_LOSS_SESS:
3531 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
3532 "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
3537 case QLA_TGT_NEXUS_LOSS:
3538 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
3539 "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
3544 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
3545 "qla_target(%d): Unknown task mgmt fn 0x%x\n",
3546 sess->vha->vp_idx, fn);
3547 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
3551 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
3553 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
3554 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
3555 sess->vha->vp_idx, res);
3556 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
3563 /* ha->hardware_lock supposed to be held on entry */
3564 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
3566 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
3567 struct qla_hw_data *ha = vha->hw;
3568 struct qla_tgt *tgt;
3569 struct qla_tgt_sess *sess;
3570 uint32_t lun, unpacked_lun;
3573 tgt = vha->vha_tgt.qla_tgt;
3575 lun = a->u.isp24.fcp_cmnd.lun;
3576 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
3577 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
3578 a->u.isp24.fcp_hdr.s_id);
3579 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
3582 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
3583 "qla_target(%d): task mgmt fn 0x%x for "
3584 "non-existant session\n", vha->vp_idx, fn);
3585 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
3586 sizeof(struct atio_from_isp));
3589 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
3592 /* ha->hardware_lock supposed to be held on entry */
3593 static int __qlt_abort_task(struct scsi_qla_host *vha,
3594 struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
3596 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
3597 struct qla_hw_data *ha = vha->hw;
3598 struct qla_tgt_mgmt_cmd *mcmd;
3599 uint32_t lun, unpacked_lun;
3602 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
3604 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
3605 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
3606 vha->vp_idx, __func__);
3609 memset(mcmd, 0, sizeof(*mcmd));
3612 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
3613 sizeof(mcmd->orig_iocb.imm_ntfy));
3615 lun = a->u.isp24.fcp_cmnd.lun;
3616 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
3617 mcmd->reset_count = vha->hw->chip_reset;
3619 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
3620 le16_to_cpu(iocb->u.isp2x.seq_id));
3622 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
3623 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
3625 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
3632 /* ha->hardware_lock supposed to be held on entry */
3633 static int qlt_abort_task(struct scsi_qla_host *vha,
3634 struct imm_ntfy_from_isp *iocb)
3636 struct qla_hw_data *ha = vha->hw;
3637 struct qla_tgt_sess *sess;
3640 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
3642 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
3644 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
3645 "qla_target(%d): task abort for unexisting "
3646 "session\n", vha->vp_idx);
3647 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
3648 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
3651 return __qlt_abort_task(vha, iocb, sess);
3655 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3657 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
3658 struct imm_ntfy_from_isp *iocb)
3662 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
3663 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
3664 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
3666 switch (iocb->u.isp24.status_subcode) {
3672 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
3677 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3678 if (tgt->link_reinit_iocb_pending) {
3679 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
3681 tgt->link_reinit_iocb_pending = 0;
3683 res = 1; /* send notify ack */
3688 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
3689 "qla_target(%d): Unsupported ELS command %x "
3690 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
3691 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
3698 static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
3702 * FIXME: Reject non zero SRR relative offset until we can test
3703 * this code properly.
3705 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
3708 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
3709 size_t first_offset = 0, rem_offset = offset, tmp = 0;
3710 int i, sg_srr_cnt, bufflen = 0;
3712 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
3713 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
3714 "cmd->sg_cnt: %u, direction: %d\n",
3715 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
3717 if (!cmd->sg || !cmd->sg_cnt) {
3718 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
3719 "Missing cmd->sg or zero cmd->sg_cnt in"
3720 " qla_tgt_set_data_offset\n");
3724 * Walk the current cmd->sg list until we locate the new sg_srr_start
3726 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
3727 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
3728 "sg[%d]: %p page: %p, length: %d, offset: %d\n",
3729 i, sg, sg_page(sg), sg->length, sg->offset);
3731 if ((sg->length + tmp) > offset) {
3732 first_offset = rem_offset;
3734 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
3735 "Found matching sg[%d], using %p as sg_srr_start, "
3736 "and using first_offset: %zu\n", i, sg,
3741 rem_offset -= sg->length;
3744 if (!sg_srr_start) {
3745 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
3746 "Unable to locate sg_srr_start for offset: %u\n", offset);
3749 sg_srr_cnt = (cmd->sg_cnt - i);
3751 sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
3753 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
3754 "Unable to allocate sgp\n");
3757 sg_init_table(sg_srr, sg_srr_cnt);
3760 * Walk the remaining list for sg_srr_start, mapping to the newly
3761 * allocated sg_srr taking first_offset into account.
3763 for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
3765 sg_set_page(sgp, sg_page(sg),
3766 (sg->length - first_offset), first_offset);
3769 sg_set_page(sgp, sg_page(sg), sg->length, 0);
3771 bufflen += sgp->length;
3779 cmd->sg_cnt = sg_srr_cnt;
3780 cmd->bufflen = bufflen;
3781 cmd->offset += offset;
3784 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
3785 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
3787 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
3789 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
3792 if (cmd->sg_cnt < 0)
3795 if (cmd->bufflen < 0)
3802 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
3803 uint32_t srr_rel_offs, int *xmit_type)
3805 int res = 0, rel_offs;
3807 rel_offs = srr_rel_offs - cmd->offset;
3808 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
3809 srr_rel_offs, rel_offs);
3811 *xmit_type = QLA_TGT_XMIT_ALL;
3814 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
3815 "qla_target(%d): SRR rel_offs (%d) < 0",
3816 cmd->vha->vp_idx, rel_offs);
3818 } else if (rel_offs == cmd->bufflen)
3819 *xmit_type = QLA_TGT_XMIT_STATUS;
3820 else if (rel_offs > 0)
3821 res = qlt_set_data_offset(cmd, rel_offs);
3826 /* No locks, thread context */
3827 static void qlt_handle_srr(struct scsi_qla_host *vha,
3828 struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
3830 struct imm_ntfy_from_isp *ntfy =
3831 (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
3832 struct qla_hw_data *ha = vha->hw;
3833 struct qla_tgt_cmd *cmd = sctio->cmd;
3834 struct se_cmd *se_cmd = &cmd->se_cmd;
3835 unsigned long flags;
3836 int xmit_type = 0, resp = 0;
3840 offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
3841 srr_ui = ntfy->u.isp24.srr_ui;
3843 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
3848 spin_lock_irqsave(&ha->hardware_lock, flags);
3849 qlt_send_notify_ack(vha, ntfy,
3850 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3851 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3852 xmit_type = QLA_TGT_XMIT_STATUS;
3855 case SRR_IU_DATA_IN:
3856 if (!cmd->sg || !cmd->sg_cnt) {
3857 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
3858 "Unable to process SRR_IU_DATA_IN due to"
3859 " missing cmd->sg, state: %d\n", cmd->state);
3863 if (se_cmd->scsi_status != 0) {
3864 ql_dbg(ql_dbg_tgt, vha, 0xe02a,
3865 "Rejecting SRR_IU_DATA_IN with non GOOD "
3869 cmd->bufflen = se_cmd->data_length;
3871 if (qlt_has_data(cmd)) {
3872 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
3874 spin_lock_irqsave(&ha->hardware_lock, flags);
3875 qlt_send_notify_ack(vha, ntfy,
3876 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3877 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3880 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
3881 "qla_target(%d): SRR for in data for cmd without them (tag %lld, SCSI status %d), reject",
3882 vha->vp_idx, se_cmd->tag,
3883 cmd->se_cmd.scsi_status);
3887 case SRR_IU_DATA_OUT:
3888 if (!cmd->sg || !cmd->sg_cnt) {
3889 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
3890 "Unable to process SRR_IU_DATA_OUT due to"
3891 " missing cmd->sg\n");
3895 if (se_cmd->scsi_status != 0) {
3896 ql_dbg(ql_dbg_tgt, vha, 0xe02b,
3897 "Rejecting SRR_IU_DATA_OUT"
3898 " with non GOOD scsi_status\n");
3901 cmd->bufflen = se_cmd->data_length;
3903 if (qlt_has_data(cmd)) {
3904 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
3906 spin_lock_irqsave(&ha->hardware_lock, flags);
3907 qlt_send_notify_ack(vha, ntfy,
3908 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3909 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3910 if (xmit_type & QLA_TGT_XMIT_DATA) {
3911 cmd->cmd_flags |= BIT_8;
3912 qlt_rdy_to_xfer(cmd);
3915 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
3916 "qla_target(%d): SRR for out data for cmd without them (tag %lld, SCSI status %d), reject",
3917 vha->vp_idx, se_cmd->tag, cmd->se_cmd.scsi_status);
3922 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
3923 "qla_target(%d): Unknown srr_ui value %x",
3924 vha->vp_idx, srr_ui);
3928 /* Transmit response in case of status and data-in cases */
3930 cmd->cmd_flags |= BIT_7;
3931 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
3937 spin_lock_irqsave(&ha->hardware_lock, flags);
3938 qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
3939 NOTIFY_ACK_SRR_FLAGS_REJECT,
3940 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3941 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3942 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3943 cmd->state = QLA_TGT_STATE_DATA_IN;
3946 cmd->cmd_flags |= BIT_9;
3947 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
3949 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3952 static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
3953 struct qla_tgt_srr_imm *imm, int ha_locked)
3955 struct qla_hw_data *ha = vha->hw;
3956 unsigned long flags = 0;
3960 spin_lock_irqsave(&ha->hardware_lock, flags);
3963 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
3964 NOTIFY_ACK_SRR_FLAGS_REJECT,
3965 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3966 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3970 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3976 static void qlt_handle_srr_work(struct work_struct *work)
3978 struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
3979 struct scsi_qla_host *vha = tgt->vha;
3980 struct qla_tgt_srr_ctio *sctio;
3981 unsigned long flags;
3983 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
3987 spin_lock_irqsave(&tgt->srr_lock, flags);
3988 list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
3989 struct qla_tgt_srr_imm *imm, *i, *ti;
3990 struct qla_tgt_cmd *cmd;
3991 struct se_cmd *se_cmd;
3994 list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
3996 if (i->srr_id == sctio->srr_id) {
3997 list_del(&i->srr_list_entry);
3999 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
4000 "qla_target(%d): There must be "
4001 "only one IMM SRR per CTIO SRR "
4002 "(IMM SRR %p, id %d, CTIO %p\n",
4003 vha->vp_idx, i, i->srr_id, sctio);
4004 qlt_reject_free_srr_imm(tgt->vha, i, 0);
4010 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
4011 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
4015 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
4016 "Not found matching IMM for SRR CTIO (id %d)\n",
4020 list_del(&sctio->srr_list_entry);
4022 spin_unlock_irqrestore(&tgt->srr_lock, flags);
4026 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
4027 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
4036 se_cmd = &cmd->se_cmd;
4038 cmd->sg_cnt = se_cmd->t_data_nents;
4039 cmd->sg = se_cmd->t_data_sg;
4041 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
4042 "SRR cmd %p (se_cmd %p, tag %lld, op %x), sg_cnt=%d, offset=%d",
4043 cmd, &cmd->se_cmd, se_cmd->tag, se_cmd->t_task_cdb ?
4044 se_cmd->t_task_cdb[0] : 0, cmd->sg_cnt, cmd->offset);
4046 qlt_handle_srr(vha, sctio, imm);
4052 spin_unlock_irqrestore(&tgt->srr_lock, flags);
4055 /* ha->hardware_lock supposed to be held on entry */
4056 static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
4057 struct imm_ntfy_from_isp *iocb)
4059 struct qla_tgt_srr_imm *imm;
4060 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4061 struct qla_tgt_srr_ctio *sctio;
4065 ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n",
4068 imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
4070 memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
4072 /* IRQ is already OFF */
4073 spin_lock(&tgt->srr_lock);
4074 imm->srr_id = tgt->imm_srr_id;
4075 list_add_tail(&imm->srr_list_entry,
4076 &tgt->srr_imm_list);
4077 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
4078 "IMM NTFY SRR %p added (id %d, ui %x)\n",
4079 imm, imm->srr_id, iocb->u.isp24.srr_ui);
4080 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
4082 list_for_each_entry(sctio, &tgt->srr_ctio_list,
4084 if (sctio->srr_id == imm->srr_id) {
4090 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
4091 "Scheduling srr work\n");
4092 schedule_work(&tgt->srr_work);
4094 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
4095 "qla_target(%d): imm_srr_id "
4096 "== ctio_srr_id (%d), but there is no "
4097 "corresponding SRR CTIO, deleting IMM "
4098 "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
4100 list_del(&imm->srr_list_entry);
4104 spin_unlock(&tgt->srr_lock);
4108 spin_unlock(&tgt->srr_lock);
4110 struct qla_tgt_srr_ctio *ts;
4112 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
4113 "qla_target(%d): Unable to allocate SRR IMM "
4114 "entry, SRR request will be rejected\n", vha->vp_idx);
4116 /* IRQ is already OFF */
4117 spin_lock(&tgt->srr_lock);
4118 list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
4120 if (sctio->srr_id == tgt->imm_srr_id) {
4121 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
4122 "CTIO SRR %p deleted (id %d)\n",
4123 sctio, sctio->srr_id);
4124 list_del(&sctio->srr_list_entry);
4125 qlt_send_term_exchange(vha, sctio->cmd,
4126 &sctio->cmd->atio, 1);
4130 spin_unlock(&tgt->srr_lock);
4137 qlt_send_notify_ack(vha, iocb, 0, 0, 0,
4138 NOTIFY_ACK_SRR_FLAGS_REJECT,
4139 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
4140 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
4144 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4146 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
4147 struct imm_ntfy_from_isp *iocb)
4149 struct qla_hw_data *ha = vha->hw;
4150 uint32_t add_flags = 0;
4151 int send_notify_ack = 1;
4154 status = le16_to_cpu(iocb->u.isp2x.status);
4156 case IMM_NTFY_LIP_RESET:
4158 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
4159 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
4160 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
4161 iocb->u.isp24.status_subcode);
4163 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
4164 send_notify_ack = 0;
4168 case IMM_NTFY_LIP_LINK_REINIT:
4170 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4171 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
4172 "qla_target(%d): LINK REINIT (loop %#x, "
4173 "subcode %x)\n", vha->vp_idx,
4174 le16_to_cpu(iocb->u.isp24.nport_handle),
4175 iocb->u.isp24.status_subcode);
4176 if (tgt->link_reinit_iocb_pending) {
4177 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
4180 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
4181 tgt->link_reinit_iocb_pending = 1;
4183 * QLogic requires to wait after LINK REINIT for possible
4184 * PDISC or ADISC ELS commands
4186 send_notify_ack = 0;
4190 case IMM_NTFY_PORT_LOGOUT:
4191 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
4192 "qla_target(%d): Port logout (loop "
4193 "%#x, subcode %x)\n", vha->vp_idx,
4194 le16_to_cpu(iocb->u.isp24.nport_handle),
4195 iocb->u.isp24.status_subcode);
4197 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
4198 send_notify_ack = 0;
4199 /* The sessions will be cleared in the callback, if needed */
4202 case IMM_NTFY_GLBL_TPRLO:
4203 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
4204 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
4205 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
4206 send_notify_ack = 0;
4207 /* The sessions will be cleared in the callback, if needed */
4210 case IMM_NTFY_PORT_CONFIG:
4211 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
4212 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
4214 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
4215 send_notify_ack = 0;
4216 /* The sessions will be cleared in the callback, if needed */
4219 case IMM_NTFY_GLBL_LOGO:
4220 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
4221 "qla_target(%d): Link failure detected\n",
4223 /* I_T nexus loss */
4224 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
4225 send_notify_ack = 0;
4228 case IMM_NTFY_IOCB_OVERFLOW:
4229 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
4230 "qla_target(%d): Cannot provide requested "
4231 "capability (IOCB overflowed the immediate notify "
4232 "resource count)\n", vha->vp_idx);
4235 case IMM_NTFY_ABORT_TASK:
4236 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
4237 "qla_target(%d): Abort Task (S %08x I %#x -> "
4238 "L %#x)\n", vha->vp_idx,
4239 le16_to_cpu(iocb->u.isp2x.seq_id),
4240 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
4241 le16_to_cpu(iocb->u.isp2x.lun));
4242 if (qlt_abort_task(vha, iocb) == 0)
4243 send_notify_ack = 0;
4246 case IMM_NTFY_RESOURCE:
4247 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
4248 "qla_target(%d): Out of resources, host %ld\n",
4249 vha->vp_idx, vha->host_no);
4252 case IMM_NTFY_MSG_RX:
4253 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
4254 "qla_target(%d): Immediate notify task %x\n",
4255 vha->vp_idx, iocb->u.isp2x.task_flags);
4256 if (qlt_handle_task_mgmt(vha, iocb) == 0)
4257 send_notify_ack = 0;
4261 if (qlt_24xx_handle_els(vha, iocb) == 0)
4262 send_notify_ack = 0;
4266 qlt_prepare_srr_imm(vha, iocb);
4267 send_notify_ack = 0;
4271 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
4272 "qla_target(%d): Received unknown immediate "
4273 "notify status %x\n", vha->vp_idx, status);
4277 if (send_notify_ack)
4278 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
4282 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4283 * This function sends busy to ISP 2xxx or 24xx.
4285 static int __qlt_send_busy(struct scsi_qla_host *vha,
4286 struct atio_from_isp *atio, uint16_t status)
4288 struct ctio7_to_24xx *ctio24;
4289 struct qla_hw_data *ha = vha->hw;
4291 struct qla_tgt_sess *sess = NULL;
4293 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4294 atio->u.isp24.fcp_hdr.s_id);
4296 qlt_send_term_exchange(vha, NULL, atio, 1);
4299 /* Sending marker isn't necessary, since we called from ISR */
4301 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
4303 ql_dbg(ql_dbg_io, vha, 0x3063,
4304 "qla_target(%d): %s failed: unable to allocate "
4305 "request packet", vha->vp_idx, __func__);
4309 pkt->entry_count = 1;
4310 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
4312 ctio24 = (struct ctio7_to_24xx *)pkt;
4313 ctio24->entry_type = CTIO_TYPE7;
4314 ctio24->nport_handle = sess->loop_id;
4315 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
4316 ctio24->vp_index = vha->vp_idx;
4317 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
4318 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
4319 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
4320 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
4321 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
4323 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
4324 CTIO7_FLAGS_DONT_RET_CTIO);
4326 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
4327 * if the explicit conformation is used.
4329 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
4330 ctio24->u.status1.scsi_status = cpu_to_le16(status);
4331 /* Memory Barrier */
4333 qla2x00_start_iocbs(vha, vha->req);
4338 * This routine is used to allocate a command for either a QFull condition
4339 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
4343 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
4344 struct atio_from_isp *atio, uint16_t status, int qfull)
4346 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4347 struct qla_hw_data *ha = vha->hw;
4348 struct qla_tgt_sess *sess;
4349 struct se_session *se_sess;
4350 struct qla_tgt_cmd *cmd;
4353 if (unlikely(tgt->tgt_stop)) {
4354 ql_dbg(ql_dbg_io, vha, 0x300a,
4355 "New command while device %p is shutting down\n", tgt);
4359 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
4360 vha->hw->tgt.num_qfull_cmds_dropped++;
4361 if (vha->hw->tgt.num_qfull_cmds_dropped >
4362 vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
4363 vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
4364 vha->hw->tgt.num_qfull_cmds_dropped;
4366 ql_dbg(ql_dbg_io, vha, 0x3068,
4367 "qla_target(%d): %s: QFull CMD dropped[%d]\n",
4368 vha->vp_idx, __func__,
4369 vha->hw->tgt.num_qfull_cmds_dropped);
4371 qlt_chk_exch_leak_thresh_hold(vha);
4375 sess = ha->tgt.tgt_ops->find_sess_by_s_id
4376 (vha, atio->u.isp24.fcp_hdr.s_id);
4380 se_sess = sess->se_sess;
4382 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
4386 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
4388 ql_dbg(ql_dbg_io, vha, 0x3009,
4389 "qla_target(%d): %s: Allocation of cmd failed\n",
4390 vha->vp_idx, __func__);
4392 vha->hw->tgt.num_qfull_cmds_dropped++;
4393 if (vha->hw->tgt.num_qfull_cmds_dropped >
4394 vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
4395 vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
4396 vha->hw->tgt.num_qfull_cmds_dropped;
4398 qlt_chk_exch_leak_thresh_hold(vha);
4402 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
4404 qlt_incr_num_pend_cmds(vha);
4405 INIT_LIST_HEAD(&cmd->cmd_list);
4406 memcpy(&cmd->atio, atio, sizeof(*atio));
4408 cmd->tgt = vha->vha_tgt.qla_tgt;
4410 cmd->reset_count = vha->hw->chip_reset;
4415 /* NOTE: borrowing the state field to carry the status */
4416 cmd->state = status;
4418 cmd->term_exchg = 1;
4420 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
4422 vha->hw->tgt.num_qfull_cmds_alloc++;
4423 if (vha->hw->tgt.num_qfull_cmds_alloc >
4424 vha->hw->qla_stats.stat_max_qfull_cmds_alloc)
4425 vha->hw->qla_stats.stat_max_qfull_cmds_alloc =
4426 vha->hw->tgt.num_qfull_cmds_alloc;
4430 qlt_free_qfull_cmds(struct scsi_qla_host *vha)
4432 struct qla_hw_data *ha = vha->hw;
4433 unsigned long flags;
4434 struct qla_tgt_cmd *cmd, *tcmd;
4435 struct list_head free_list;
4438 if (list_empty(&ha->tgt.q_full_list))
4441 INIT_LIST_HEAD(&free_list);
4443 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
4445 if (list_empty(&ha->tgt.q_full_list)) {
4446 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
4450 list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) {
4452 /* cmd->state is a borrowed field to hold status */
4453 rc = __qlt_send_busy(vha, &cmd->atio, cmd->state);
4454 else if (cmd->term_exchg)
4455 rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio);
4461 ql_dbg(ql_dbg_io, vha, 0x3006,
4462 "%s: busy sent for ox_id[%04x]\n", __func__,
4463 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
4464 else if (cmd->term_exchg)
4465 ql_dbg(ql_dbg_io, vha, 0x3007,
4466 "%s: Term exchg sent for ox_id[%04x]\n", __func__,
4467 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
4469 ql_dbg(ql_dbg_io, vha, 0x3008,
4470 "%s: Unexpected cmd in QFull list %p\n", __func__,
4473 list_del(&cmd->cmd_list);
4474 list_add_tail(&cmd->cmd_list, &free_list);
4476 /* piggy back on hardware_lock for protection */
4477 vha->hw->tgt.num_qfull_cmds_alloc--;
4479 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
4483 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
4484 list_del(&cmd->cmd_list);
4485 /* This cmd was never sent to TCM. There is no need
4486 * to schedule free or call free_cmd
4494 qlt_send_busy(struct scsi_qla_host *vha,
4495 struct atio_from_isp *atio, uint16_t status)
4499 rc = __qlt_send_busy(vha, atio, status);
4501 qlt_alloc_qfull_cmd(vha, atio, status, 1);
4505 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
4506 struct atio_from_isp *atio)
4508 struct qla_hw_data *ha = vha->hw;
4511 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
4514 status = temp_sam_status;
4515 qlt_send_busy(vha, atio, status);
4519 /* ha->hardware_lock supposed to be held on entry */
4520 /* called via callback from qla2xxx */
4521 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
4522 struct atio_from_isp *atio)
4524 struct qla_hw_data *ha = vha->hw;
4525 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4528 if (unlikely(tgt == NULL)) {
4529 ql_dbg(ql_dbg_io, vha, 0x3064,
4530 "ATIO pkt, but no tgt (ha %p)", ha);
4534 * In tgt_stop mode we also should allow all requests to pass.
4535 * Otherwise, some commands can stuck.
4538 tgt->irq_cmd_count++;
4540 switch (atio->u.raw.entry_type) {
4542 if (unlikely(atio->u.isp24.exchange_addr ==
4543 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
4544 ql_dbg(ql_dbg_io, vha, 0x3065,
4545 "qla_target(%d): ATIO_TYPE7 "
4546 "received with UNKNOWN exchange address, "
4547 "sending QUEUE_FULL\n", vha->vp_idx);
4548 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
4554 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
4555 rc = qlt_chk_qfull_thresh_hold(vha, atio);
4557 tgt->irq_cmd_count--;
4560 rc = qlt_handle_cmd_for_atio(vha, atio);
4562 rc = qlt_handle_task_mgmt(vha, atio);
4564 if (unlikely(rc != 0)) {
4566 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
4567 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
4569 qlt_send_term_exchange(vha, NULL, atio, 1);
4572 if (tgt->tgt_stop) {
4573 ql_dbg(ql_dbg_tgt, vha, 0xe059,
4574 "qla_target: Unable to send "
4575 "command to target for req, "
4578 ql_dbg(ql_dbg_tgt, vha, 0xe05a,
4579 "qla_target(%d): Unable to send "
4580 "command to target, sending BUSY "
4581 "status.\n", vha->vp_idx);
4582 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
4588 case IMMED_NOTIFY_TYPE:
4590 if (unlikely(atio->u.isp2x.entry_status != 0)) {
4591 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
4592 "qla_target(%d): Received ATIO packet %x "
4593 "with error status %x\n", vha->vp_idx,
4594 atio->u.raw.entry_type,
4595 atio->u.isp2x.entry_status);
4598 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
4599 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
4604 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
4605 "qla_target(%d): Received unknown ATIO atio "
4606 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
4610 tgt->irq_cmd_count--;
4613 /* ha->hardware_lock supposed to be held on entry */
4614 /* called via callback from qla2xxx */
4615 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
4617 struct qla_hw_data *ha = vha->hw;
4618 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4620 if (unlikely(tgt == NULL)) {
4621 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
4622 "qla_target(%d): Response pkt %x received, but no "
4623 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
4628 * In tgt_stop mode we also should allow all requests to pass.
4629 * Otherwise, some commands can stuck.
4632 tgt->irq_cmd_count++;
4634 switch (pkt->entry_type) {
4638 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
4639 qlt_do_ctio_completion(vha, entry->handle,
4640 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
4645 case ACCEPT_TGT_IO_TYPE:
4647 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
4649 if (atio->u.isp2x.status !=
4650 cpu_to_le16(ATIO_CDB_VALID)) {
4651 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
4652 "qla_target(%d): ATIO with error "
4653 "status %x received\n", vha->vp_idx,
4654 le16_to_cpu(atio->u.isp2x.status));
4658 rc = qlt_chk_qfull_thresh_hold(vha, atio);
4660 tgt->irq_cmd_count--;
4664 rc = qlt_handle_cmd_for_atio(vha, atio);
4665 if (unlikely(rc != 0)) {
4667 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
4668 qlt_send_busy(vha, atio, 0);
4670 qlt_send_term_exchange(vha, NULL, atio, 1);
4673 if (tgt->tgt_stop) {
4674 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
4675 "qla_target: Unable to send "
4676 "command to target, sending TERM "
4677 "EXCHANGE for rsp\n");
4678 qlt_send_term_exchange(vha, NULL,
4681 ql_dbg(ql_dbg_tgt, vha, 0xe060,
4682 "qla_target(%d): Unable to send "
4683 "command to target, sending BUSY "
4684 "status\n", vha->vp_idx);
4685 qlt_send_busy(vha, atio, 0);
4692 case CONTINUE_TGT_IO_TYPE:
4694 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
4695 qlt_do_ctio_completion(vha, entry->handle,
4696 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
4703 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
4704 qlt_do_ctio_completion(vha, entry->handle,
4705 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
4710 case IMMED_NOTIFY_TYPE:
4711 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
4712 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
4715 case NOTIFY_ACK_TYPE:
4716 if (tgt->notify_ack_expected > 0) {
4717 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
4718 ql_dbg(ql_dbg_tgt, vha, 0xe036,
4719 "NOTIFY_ACK seq %08x status %x\n",
4720 le16_to_cpu(entry->u.isp2x.seq_id),
4721 le16_to_cpu(entry->u.isp2x.status));
4722 tgt->notify_ack_expected--;
4723 if (entry->u.isp2x.status !=
4724 cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
4725 ql_dbg(ql_dbg_tgt, vha, 0xe061,
4726 "qla_target(%d): NOTIFY_ACK "
4727 "failed %x\n", vha->vp_idx,
4728 le16_to_cpu(entry->u.isp2x.status));
4731 ql_dbg(ql_dbg_tgt, vha, 0xe062,
4732 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
4737 case ABTS_RECV_24XX:
4738 ql_dbg(ql_dbg_tgt, vha, 0xe037,
4739 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
4740 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
4743 case ABTS_RESP_24XX:
4744 if (tgt->abts_resp_expected > 0) {
4745 struct abts_resp_from_24xx_fw *entry =
4746 (struct abts_resp_from_24xx_fw *)pkt;
4747 ql_dbg(ql_dbg_tgt, vha, 0xe038,
4748 "ABTS_RESP_24XX: compl_status %x\n",
4749 entry->compl_status);
4750 tgt->abts_resp_expected--;
4751 if (le16_to_cpu(entry->compl_status) !=
4752 ABTS_RESP_COMPL_SUCCESS) {
4753 if ((entry->error_subcode1 == 0x1E) &&
4754 (entry->error_subcode2 == 0)) {
4756 * We've got a race here: aborted
4757 * exchange not terminated, i.e.
4758 * response for the aborted command was
4759 * sent between the abort request was
4760 * received and processed.
4761 * Unfortunately, the firmware has a
4762 * silly requirement that all aborted
4763 * exchanges must be explicitely
4764 * terminated, otherwise it refuses to
4765 * send responses for the abort
4766 * requests. So, we have to
4767 * (re)terminate the exchange and retry
4768 * the abort response.
4770 qlt_24xx_retry_term_exchange(vha,
4773 ql_dbg(ql_dbg_tgt, vha, 0xe063,
4774 "qla_target(%d): ABTS_RESP_24XX "
4775 "failed %x (subcode %x:%x)",
4776 vha->vp_idx, entry->compl_status,
4777 entry->error_subcode1,
4778 entry->error_subcode2);
4781 ql_dbg(ql_dbg_tgt, vha, 0xe064,
4782 "qla_target(%d): Unexpected ABTS_RESP_24XX "
4783 "received\n", vha->vp_idx);
4788 ql_dbg(ql_dbg_tgt, vha, 0xe065,
4789 "qla_target(%d): Received unknown response pkt "
4790 "type %x\n", vha->vp_idx, pkt->entry_type);
4794 tgt->irq_cmd_count--;
4798 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4800 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
4803 struct qla_hw_data *ha = vha->hw;
4804 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4807 if (!ha->tgt.tgt_ops)
4810 if (unlikely(tgt == NULL)) {
4811 ql_dbg(ql_dbg_tgt, vha, 0xe03a,
4812 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
4816 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
4820 * In tgt_stop mode we also should allow all requests to pass.
4821 * Otherwise, some commands can stuck.
4824 tgt->irq_cmd_count++;
4827 case MBA_RESET: /* Reset */
4828 case MBA_SYSTEM_ERR: /* System Error */
4829 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
4830 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
4831 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
4832 "qla_target(%d): System error async event %#x "
4833 "occurred", vha->vp_idx, code);
4835 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
4836 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4841 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
4842 "qla_target(%d): Async LOOP_UP occurred "
4843 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
4844 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4845 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4846 if (tgt->link_reinit_iocb_pending) {
4847 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
4849 tgt->link_reinit_iocb_pending = 0;
4854 case MBA_LIP_OCCURRED:
4857 case MBA_RSCN_UPDATE:
4858 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
4859 "qla_target(%d): Async event %#x occurred "
4860 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
4861 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4862 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4865 case MBA_PORT_UPDATE:
4866 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
4867 "qla_target(%d): Port update async event %#x "
4868 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
4869 "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
4870 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4871 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4873 login_code = le16_to_cpu(mailbox[2]);
4874 if (login_code == 0x4)
4875 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
4876 "Async MB 2: Got PLOGI Complete\n");
4877 else if (login_code == 0x7)
4878 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
4879 "Async MB 2: Port Logged Out\n");
4886 tgt->irq_cmd_count--;
4889 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
4895 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
4897 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
4898 "qla_target(%d): Allocation of tmp FC port failed",
4903 fcport->loop_id = loop_id;
4905 rc = qla2x00_get_port_database(vha, fcport, 0);
4906 if (rc != QLA_SUCCESS) {
4907 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
4908 "qla_target(%d): Failed to retrieve fcport "
4909 "information -- get_port_database() returned %x "
4910 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
4918 /* Must be called under tgt_mutex */
4919 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
4922 struct qla_tgt_sess *sess = NULL;
4923 fc_port_t *fcport = NULL;
4924 int rc, global_resets;
4925 uint16_t loop_id = 0;
4929 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
4931 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
4933 if ((s_id[0] == 0xFF) &&
4934 (s_id[1] == 0xFC)) {
4936 * This is Domain Controller, so it should be
4937 * OK to drop SCSI commands from it.
4939 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
4940 "Unable to find initiator with S_ID %x:%x:%x",
4941 s_id[0], s_id[1], s_id[2]);
4943 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
4944 "qla_target(%d): Unable to find "
4945 "initiator with S_ID %x:%x:%x",
4946 vha->vp_idx, s_id[0], s_id[1],
4951 fcport = qlt_get_port_database(vha, loop_id);
4955 if (global_resets !=
4956 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
4957 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
4958 "qla_target(%d): global reset during session discovery "
4959 "(counter was %d, new %d), retrying", vha->vp_idx,
4961 atomic_read(&vha->vha_tgt.
4962 qla_tgt->tgt_global_resets_count));
4966 sess = qlt_create_sess(vha, fcport, true);
4972 static void qlt_abort_work(struct qla_tgt *tgt,
4973 struct qla_tgt_sess_work_param *prm)
4975 struct scsi_qla_host *vha = tgt->vha;
4976 struct qla_hw_data *ha = vha->hw;
4977 struct qla_tgt_sess *sess = NULL;
4978 unsigned long flags;
4983 spin_lock_irqsave(&ha->hardware_lock, flags);
4988 s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
4989 s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
4990 s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
4992 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4993 (unsigned char *)&be_s_id);
4995 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4997 mutex_lock(&vha->vha_tgt.tgt_mutex);
4998 sess = qlt_make_local_sess(vha, s_id);
4999 /* sess has got an extra creation ref */
5000 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5002 spin_lock_irqsave(&ha->hardware_lock, flags);
5006 kref_get(&sess->se_sess->sess_kref);
5012 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
5016 ha->tgt.tgt_ops->put_sess(sess);
5017 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5021 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
5023 ha->tgt.tgt_ops->put_sess(sess);
5024 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5027 static void qlt_tmr_work(struct qla_tgt *tgt,
5028 struct qla_tgt_sess_work_param *prm)
5030 struct atio_from_isp *a = &prm->tm_iocb2;
5031 struct scsi_qla_host *vha = tgt->vha;
5032 struct qla_hw_data *ha = vha->hw;
5033 struct qla_tgt_sess *sess = NULL;
5034 unsigned long flags;
5035 uint8_t *s_id = NULL; /* to hide compiler warnings */
5037 uint32_t lun, unpacked_lun;
5041 spin_lock_irqsave(&ha->hardware_lock, flags);
5046 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
5047 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
5049 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5051 mutex_lock(&vha->vha_tgt.tgt_mutex);
5052 sess = qlt_make_local_sess(vha, s_id);
5053 /* sess has got an extra creation ref */
5054 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5056 spin_lock_irqsave(&ha->hardware_lock, flags);
5060 kref_get(&sess->se_sess->sess_kref);
5064 lun = a->u.isp24.fcp_cmnd.lun;
5065 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
5066 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
5068 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
5072 ha->tgt.tgt_ops->put_sess(sess);
5073 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5077 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
5079 ha->tgt.tgt_ops->put_sess(sess);
5080 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5083 static void qlt_sess_work_fn(struct work_struct *work)
5085 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
5086 struct scsi_qla_host *vha = tgt->vha;
5087 unsigned long flags;
5089 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
5091 spin_lock_irqsave(&tgt->sess_work_lock, flags);
5092 while (!list_empty(&tgt->sess_works_list)) {
5093 struct qla_tgt_sess_work_param *prm = list_entry(
5094 tgt->sess_works_list.next, typeof(*prm),
5095 sess_works_list_entry);
5098 * This work can be scheduled on several CPUs at time, so we
5099 * must delete the entry to eliminate double processing
5101 list_del(&prm->sess_works_list_entry);
5103 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
5105 switch (prm->type) {
5106 case QLA_TGT_SESS_WORK_ABORT:
5107 qlt_abort_work(tgt, prm);
5109 case QLA_TGT_SESS_WORK_TM:
5110 qlt_tmr_work(tgt, prm);
5117 spin_lock_irqsave(&tgt->sess_work_lock, flags);
5121 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
5124 /* Must be called under tgt_host_action_mutex */
5125 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
5127 struct qla_tgt *tgt;
5129 if (!QLA_TGT_MODE_ENABLED())
5132 if (!IS_TGT_MODE_CAPABLE(ha)) {
5133 ql_log(ql_log_warn, base_vha, 0xe070,
5134 "This adapter does not support target mode.\n");
5138 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
5139 "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
5141 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
5143 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
5145 ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
5146 "Unable to allocate struct qla_tgt\n");
5150 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
5151 base_vha->host->hostt->supported_mode |= MODE_TARGET;
5154 tgt->vha = base_vha;
5155 init_waitqueue_head(&tgt->waitQ);
5156 INIT_LIST_HEAD(&tgt->sess_list);
5157 INIT_LIST_HEAD(&tgt->del_sess_list);
5158 INIT_DELAYED_WORK(&tgt->sess_del_work,
5159 (void (*)(struct work_struct *))qlt_del_sess_work_fn);
5160 spin_lock_init(&tgt->sess_work_lock);
5161 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
5162 INIT_LIST_HEAD(&tgt->sess_works_list);
5163 spin_lock_init(&tgt->srr_lock);
5164 INIT_LIST_HEAD(&tgt->srr_ctio_list);
5165 INIT_LIST_HEAD(&tgt->srr_imm_list);
5166 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
5167 atomic_set(&tgt->tgt_global_resets_count, 0);
5169 base_vha->vha_tgt.qla_tgt = tgt;
5171 ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
5172 "qla_target(%d): using 64 Bit PCI addressing",
5174 tgt->tgt_enable_64bit_addr = 1;
5176 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
5177 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
5178 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
5180 if (base_vha->fc_vport)
5183 mutex_lock(&qla_tgt_mutex);
5184 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
5185 mutex_unlock(&qla_tgt_mutex);
5190 /* Must be called under tgt_host_action_mutex */
5191 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
5193 if (!vha->vha_tgt.qla_tgt)
5196 if (vha->fc_vport) {
5197 qlt_release(vha->vha_tgt.qla_tgt);
5201 /* free left over qfull cmds */
5202 qlt_init_term_exchange(vha);
5204 mutex_lock(&qla_tgt_mutex);
5205 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
5206 mutex_unlock(&qla_tgt_mutex);
5208 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
5210 qlt_release(vha->vha_tgt.qla_tgt);
5215 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
5220 pr_debug("qla2xxx HW vha->node_name: ");
5221 for (i = 0; i < WWN_SIZE; i++)
5222 pr_debug("%02x ", vha->node_name[i]);
5224 pr_debug("qla2xxx HW vha->port_name: ");
5225 for (i = 0; i < WWN_SIZE; i++)
5226 pr_debug("%02x ", vha->port_name[i]);
5229 pr_debug("qla2xxx passed configfs WWPN: ");
5230 put_unaligned_be64(wwpn, b);
5231 for (i = 0; i < WWN_SIZE; i++)
5232 pr_debug("%02x ", b[i]);
5237 * qla_tgt_lport_register - register lport with external module
5239 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
5240 * @wwpn: Passwd FC target WWPN
5241 * @callback: lport initialization callback for tcm_qla2xxx code
5242 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
5244 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
5245 u64 npiv_wwpn, u64 npiv_wwnn,
5246 int (*callback)(struct scsi_qla_host *, void *, u64, u64))
5248 struct qla_tgt *tgt;
5249 struct scsi_qla_host *vha;
5250 struct qla_hw_data *ha;
5251 struct Scsi_Host *host;
5252 unsigned long flags;
5256 mutex_lock(&qla_tgt_mutex);
5257 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
5265 if (!(host->hostt->supported_mode & MODE_TARGET))
5268 spin_lock_irqsave(&ha->hardware_lock, flags);
5269 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
5270 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
5272 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5275 if (tgt->tgt_stop) {
5276 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
5278 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5281 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5283 if (!scsi_host_get(host)) {
5284 ql_dbg(ql_dbg_tgt, vha, 0xe068,
5285 "Unable to scsi_host_get() for"
5286 " qla2xxx scsi_host\n");
5289 qlt_lport_dump(vha, phys_wwpn, b);
5291 if (memcmp(vha->port_name, b, WWN_SIZE)) {
5292 scsi_host_put(host);
5295 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
5297 scsi_host_put(host);
5299 mutex_unlock(&qla_tgt_mutex);
5302 mutex_unlock(&qla_tgt_mutex);
5306 EXPORT_SYMBOL(qlt_lport_register);
5309 * qla_tgt_lport_deregister - Degister lport
5311 * @vha: Registered scsi_qla_host pointer
5313 void qlt_lport_deregister(struct scsi_qla_host *vha)
5315 struct qla_hw_data *ha = vha->hw;
5316 struct Scsi_Host *sh = vha->host;
5318 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
5320 vha->vha_tgt.target_lport_ptr = NULL;
5321 ha->tgt.tgt_ops = NULL;
5323 * Release the Scsi_Host reference for the underlying qla2xxx host
5327 EXPORT_SYMBOL(qlt_lport_deregister);
5329 /* Must be called under HW lock */
5330 static void qlt_set_mode(struct scsi_qla_host *vha)
5332 struct qla_hw_data *ha = vha->hw;
5334 switch (ql2x_ini_mode) {
5335 case QLA2XXX_INI_MODE_DISABLED:
5336 case QLA2XXX_INI_MODE_EXCLUSIVE:
5337 vha->host->active_mode = MODE_TARGET;
5339 case QLA2XXX_INI_MODE_ENABLED:
5340 vha->host->active_mode |= MODE_TARGET;
5346 if (ha->tgt.ini_mode_force_reverse)
5347 qla_reverse_ini_mode(vha);
5350 /* Must be called under HW lock */
5351 static void qlt_clear_mode(struct scsi_qla_host *vha)
5353 struct qla_hw_data *ha = vha->hw;
5355 switch (ql2x_ini_mode) {
5356 case QLA2XXX_INI_MODE_DISABLED:
5357 vha->host->active_mode = MODE_UNKNOWN;
5359 case QLA2XXX_INI_MODE_EXCLUSIVE:
5360 vha->host->active_mode = MODE_INITIATOR;
5362 case QLA2XXX_INI_MODE_ENABLED:
5363 vha->host->active_mode &= ~MODE_TARGET;
5369 if (ha->tgt.ini_mode_force_reverse)
5370 qla_reverse_ini_mode(vha);
5374 * qla_tgt_enable_vha - NO LOCK HELD
5376 * host_reset, bring up w/ Target Mode Enabled
5379 qlt_enable_vha(struct scsi_qla_host *vha)
5381 struct qla_hw_data *ha = vha->hw;
5382 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5383 unsigned long flags;
5384 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5387 ql_dbg(ql_dbg_tgt, vha, 0xe069,
5388 "Unable to locate qla_tgt pointer from"
5389 " struct qla_hw_data\n");
5394 spin_lock_irqsave(&ha->hardware_lock, flags);
5395 tgt->tgt_stopped = 0;
5397 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5400 qla24xx_disable_vp(vha);
5401 qla24xx_enable_vp(vha);
5403 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
5404 qla2xxx_wake_dpc(base_vha);
5405 qla2x00_wait_for_hba_online(base_vha);
5408 EXPORT_SYMBOL(qlt_enable_vha);
5411 * qla_tgt_disable_vha - NO LOCK HELD
5413 * Disable Target Mode and reset the adapter
5415 static void qlt_disable_vha(struct scsi_qla_host *vha)
5417 struct qla_hw_data *ha = vha->hw;
5418 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5419 unsigned long flags;
5422 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
5423 "Unable to locate qla_tgt pointer from"
5424 " struct qla_hw_data\n");
5429 spin_lock_irqsave(&ha->hardware_lock, flags);
5430 qlt_clear_mode(vha);
5431 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5433 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5434 qla2xxx_wake_dpc(vha);
5435 qla2x00_wait_for_hba_online(vha);
5439 * Called from qla_init.c:qla24xx_vport_create() contex to setup
5440 * the target mode specific struct scsi_qla_host and struct qla_hw_data
5444 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
5446 if (!qla_tgt_mode_enabled(vha))
5449 vha->vha_tgt.qla_tgt = NULL;
5451 mutex_init(&vha->vha_tgt.tgt_mutex);
5452 mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
5454 qlt_clear_mode(vha);
5457 * NOTE: Currently the value is kept the same for <24xx and
5458 * >=24xx ISPs. If it is necessary to change it,
5459 * the check should be added for specific ISPs,
5460 * assigning the value appropriately.
5462 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
5464 qlt_add_target(ha, vha);
5468 qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
5471 * FC-4 Feature bit 0 indicates target functionality to the name server.
5473 if (qla_tgt_mode_enabled(vha)) {
5474 if (qla_ini_mode_enabled(vha))
5475 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
5477 ct_req->req.rff_id.fc4_feature = BIT_0;
5478 } else if (qla_ini_mode_enabled(vha)) {
5479 ct_req->req.rff_id.fc4_feature = BIT_1;
5484 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
5487 * Beginning of ATIO ring has initialization control block already built
5488 * by nvram config routine.
5490 * Returns 0 on success.
5493 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
5495 struct qla_hw_data *ha = vha->hw;
5497 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
5499 if (!qla_tgt_mode_enabled(vha))
5502 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
5503 pkt->u.raw.signature = ATIO_PROCESSED;
5510 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
5511 * @ha: SCSI driver HA context
5514 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
5516 struct qla_hw_data *ha = vha->hw;
5517 struct atio_from_isp *pkt;
5520 if (!vha->flags.online)
5523 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
5524 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
5525 cnt = pkt->u.raw.entry_count;
5527 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
5529 for (i = 0; i < cnt; i++) {
5530 ha->tgt.atio_ring_index++;
5531 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
5532 ha->tgt.atio_ring_index = 0;
5533 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
5535 ha->tgt.atio_ring_ptr++;
5537 pkt->u.raw.signature = ATIO_PROCESSED;
5538 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
5543 /* Adjust ring index */
5544 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
5548 qlt_24xx_config_rings(struct scsi_qla_host *vha)
5550 struct qla_hw_data *ha = vha->hw;
5551 if (!QLA_TGT_MODE_ENABLED())
5554 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
5555 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
5556 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
5558 if (IS_ATIO_MSIX_CAPABLE(ha)) {
5559 struct qla_msix_entry *msix = &ha->msix_entries[2];
5560 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
5562 icb->msix_atio = cpu_to_le16(msix->entry);
5563 ql_dbg(ql_dbg_init, vha, 0xf072,
5564 "Registering ICB vector 0x%x for atio que.\n",
5570 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
5572 struct qla_hw_data *ha = vha->hw;
5574 if (qla_tgt_mode_enabled(vha)) {
5575 if (!ha->tgt.saved_set) {
5576 /* We save only once */
5577 ha->tgt.saved_exchange_count = nv->exchange_count;
5578 ha->tgt.saved_firmware_options_1 =
5579 nv->firmware_options_1;
5580 ha->tgt.saved_firmware_options_2 =
5581 nv->firmware_options_2;
5582 ha->tgt.saved_firmware_options_3 =
5583 nv->firmware_options_3;
5584 ha->tgt.saved_set = 1;
5587 nv->exchange_count = cpu_to_le16(0xFFFF);
5589 /* Enable target mode */
5590 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
5592 /* Disable ini mode, if requested */
5593 if (!qla_ini_mode_enabled(vha))
5594 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
5596 /* Disable Full Login after LIP */
5597 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
5598 /* Enable initial LIP */
5599 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
5600 if (ql2xtgt_tape_enable)
5601 /* Enable FC Tape support */
5602 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
5604 /* Disable FC Tape support */
5605 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
5607 /* Disable Full Login after LIP */
5608 nv->host_p &= cpu_to_le32(~BIT_10);
5609 /* Enable target PRLI control */
5610 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
5612 if (ha->tgt.saved_set) {
5613 nv->exchange_count = ha->tgt.saved_exchange_count;
5614 nv->firmware_options_1 =
5615 ha->tgt.saved_firmware_options_1;
5616 nv->firmware_options_2 =
5617 ha->tgt.saved_firmware_options_2;
5618 nv->firmware_options_3 =
5619 ha->tgt.saved_firmware_options_3;
5624 /* out-of-order frames reassembly */
5625 nv->firmware_options_3 |= BIT_6|BIT_9;
5627 if (ha->tgt.enable_class_2) {
5628 if (vha->flags.init_done)
5629 fc_host_supported_classes(vha->host) =
5630 FC_COS_CLASS2 | FC_COS_CLASS3;
5632 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
5634 if (vha->flags.init_done)
5635 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
5637 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
5642 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
5643 struct init_cb_24xx *icb)
5645 struct qla_hw_data *ha = vha->hw;
5647 if (ha->tgt.node_name_set) {
5648 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
5649 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
5654 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
5656 struct qla_hw_data *ha = vha->hw;
5658 if (!QLA_TGT_MODE_ENABLED())
5661 if (qla_tgt_mode_enabled(vha)) {
5662 if (!ha->tgt.saved_set) {
5663 /* We save only once */
5664 ha->tgt.saved_exchange_count = nv->exchange_count;
5665 ha->tgt.saved_firmware_options_1 =
5666 nv->firmware_options_1;
5667 ha->tgt.saved_firmware_options_2 =
5668 nv->firmware_options_2;
5669 ha->tgt.saved_firmware_options_3 =
5670 nv->firmware_options_3;
5671 ha->tgt.saved_set = 1;
5674 nv->exchange_count = cpu_to_le16(0xFFFF);
5676 /* Enable target mode */
5677 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
5679 /* Disable ini mode, if requested */
5680 if (!qla_ini_mode_enabled(vha))
5681 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
5683 /* Disable Full Login after LIP */
5684 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
5685 /* Enable initial LIP */
5686 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
5687 if (ql2xtgt_tape_enable)
5688 /* Enable FC tape support */
5689 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
5691 /* Disable FC tape support */
5692 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
5694 /* Disable Full Login after LIP */
5695 nv->host_p &= cpu_to_le32(~BIT_10);
5696 /* Enable target PRLI control */
5697 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
5699 if (ha->tgt.saved_set) {
5700 nv->exchange_count = ha->tgt.saved_exchange_count;
5701 nv->firmware_options_1 =
5702 ha->tgt.saved_firmware_options_1;
5703 nv->firmware_options_2 =
5704 ha->tgt.saved_firmware_options_2;
5705 nv->firmware_options_3 =
5706 ha->tgt.saved_firmware_options_3;
5711 /* out-of-order frames reassembly */
5712 nv->firmware_options_3 |= BIT_6|BIT_9;
5714 if (ha->tgt.enable_class_2) {
5715 if (vha->flags.init_done)
5716 fc_host_supported_classes(vha->host) =
5717 FC_COS_CLASS2 | FC_COS_CLASS3;
5719 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
5721 if (vha->flags.init_done)
5722 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
5724 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
5729 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
5730 struct init_cb_81xx *icb)
5732 struct qla_hw_data *ha = vha->hw;
5734 if (!QLA_TGT_MODE_ENABLED())
5737 if (ha->tgt.node_name_set) {
5738 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
5739 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
5744 qlt_83xx_iospace_config(struct qla_hw_data *ha)
5746 if (!QLA_TGT_MODE_ENABLED())
5749 ha->msix_count += 1; /* For ATIO Q */
5753 qlt_24xx_process_response_error(struct scsi_qla_host *vha,
5754 struct sts_entry_24xx *pkt)
5756 switch (pkt->entry_type) {
5757 case ABTS_RECV_24XX:
5758 case ABTS_RESP_24XX:
5760 case NOTIFY_ACK_TYPE:
5769 qlt_modify_vp_config(struct scsi_qla_host *vha,
5770 struct vp_config_entry_24xx *vpmod)
5772 if (qla_tgt_mode_enabled(vha))
5773 vpmod->options_idx1 &= ~BIT_5;
5774 /* Disable ini mode, if requested */
5775 if (!qla_ini_mode_enabled(vha))
5776 vpmod->options_idx1 &= ~BIT_4;
5780 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
5782 if (!QLA_TGT_MODE_ENABLED())
5785 if (ha->mqenable || IS_QLA83XX(ha)) {
5786 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
5787 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
5789 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
5790 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
5793 mutex_init(&base_vha->vha_tgt.tgt_mutex);
5794 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
5795 qlt_clear_mode(base_vha);
5799 qla83xx_msix_atio_q(int irq, void *dev_id)
5801 struct rsp_que *rsp;
5802 scsi_qla_host_t *vha;
5803 struct qla_hw_data *ha;
5804 unsigned long flags;
5806 rsp = (struct rsp_que *) dev_id;
5808 vha = pci_get_drvdata(ha->pdev);
5810 spin_lock_irqsave(&ha->hardware_lock, flags);
5812 qlt_24xx_process_atio_queue(vha);
5813 qla24xx_process_response_queue(vha, rsp);
5815 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5821 qlt_mem_alloc(struct qla_hw_data *ha)
5823 if (!QLA_TGT_MODE_ENABLED())
5826 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
5827 MAX_MULTI_ID_FABRIC, GFP_KERNEL);
5828 if (!ha->tgt.tgt_vp_map)
5831 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
5832 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
5833 &ha->tgt.atio_dma, GFP_KERNEL);
5834 if (!ha->tgt.atio_ring) {
5835 kfree(ha->tgt.tgt_vp_map);
5842 qlt_mem_free(struct qla_hw_data *ha)
5844 if (!QLA_TGT_MODE_ENABLED())
5847 if (ha->tgt.atio_ring) {
5848 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
5849 sizeof(struct atio_from_isp), ha->tgt.atio_ring,
5852 kfree(ha->tgt.tgt_vp_map);
5855 /* vport_slock to be held by the caller */
5857 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
5859 if (!QLA_TGT_MODE_ENABLED())
5864 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
5867 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
5870 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
5873 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
5878 static int __init qlt_parse_ini_mode(void)
5880 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
5881 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
5882 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
5883 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
5884 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
5885 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
5892 int __init qlt_init(void)
5896 if (!qlt_parse_ini_mode()) {
5897 ql_log(ql_log_fatal, NULL, 0xe06b,
5898 "qlt_parse_ini_mode() failed\n");
5902 if (!QLA_TGT_MODE_ENABLED())
5905 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
5906 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
5907 qla_tgt_mgmt_cmd), 0, NULL);
5908 if (!qla_tgt_mgmt_cmd_cachep) {
5909 ql_log(ql_log_fatal, NULL, 0xe06d,
5910 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
5914 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
5915 mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
5916 if (!qla_tgt_mgmt_cmd_mempool) {
5917 ql_log(ql_log_fatal, NULL, 0xe06e,
5918 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
5920 goto out_mgmt_cmd_cachep;
5923 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
5925 ql_log(ql_log_fatal, NULL, 0xe06f,
5926 "alloc_workqueue for qla_tgt_wq failed\n");
5928 goto out_cmd_mempool;
5931 * Return 1 to signal that initiator-mode is being disabled
5933 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
5936 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
5937 out_mgmt_cmd_cachep:
5938 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
5944 if (!QLA_TGT_MODE_ENABLED())
5947 destroy_workqueue(qla_tgt_wq);
5948 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
5949 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);