1 /*******************************************************************************
2 * Filename: target_core_tpg.c
4 * This file contains generic Target Portal Group related functions.
6 * (c) Copyright 2002-2013 Datera, Inc.
8 * Nicholas A. Bellinger <nab@kernel.org>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 ******************************************************************************/
26 #include <linux/net.h>
27 #include <linux/string.h>
28 #include <linux/timer.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
32 #include <linux/export.h>
35 #include <scsi/scsi.h>
36 #include <scsi/scsi_cmnd.h>
38 #include <target/target_core_base.h>
39 #include <target/target_core_backend.h>
40 #include <target/target_core_fabric.h>
42 #include "target_core_internal.h"
44 extern struct se_device *g_lun0_dev;
46 static DEFINE_SPINLOCK(tpg_lock);
47 static LIST_HEAD(tpg_list);
49 /* core_clear_initiator_node_from_tpg():
53 static void core_clear_initiator_node_from_tpg(
54 struct se_node_acl *nacl,
55 struct se_portal_group *tpg)
58 struct se_dev_entry *deve;
61 spin_lock_irq(&nacl->device_list_lock);
62 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
63 deve = nacl->device_list[i];
65 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
69 pr_err("%s device entries device pointer is"
70 " NULL, but Initiator has access.\n",
71 tpg->se_tpg_tfo->get_fabric_name());
76 spin_unlock_irq(&nacl->device_list_lock);
77 core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
78 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
80 spin_lock_irq(&nacl->device_list_lock);
82 spin_unlock_irq(&nacl->device_list_lock);
85 /* __core_tpg_get_initiator_node_acl():
87 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
89 struct se_node_acl *__core_tpg_get_initiator_node_acl(
90 struct se_portal_group *tpg,
91 const char *initiatorname)
93 struct se_node_acl *acl;
95 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
96 if (!strcmp(acl->initiatorname, initiatorname))
103 /* core_tpg_get_initiator_node_acl():
107 struct se_node_acl *core_tpg_get_initiator_node_acl(
108 struct se_portal_group *tpg,
109 unsigned char *initiatorname)
111 struct se_node_acl *acl;
113 spin_lock_irq(&tpg->acl_node_lock);
114 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
115 spin_unlock_irq(&tpg->acl_node_lock);
119 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
121 /* core_tpg_add_node_to_devs():
125 void core_tpg_add_node_to_devs(
126 struct se_node_acl *acl,
127 struct se_portal_group *tpg)
132 struct se_device *dev;
134 spin_lock(&tpg->tpg_lun_lock);
135 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
136 lun = tpg->tpg_lun_list[i];
137 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
140 spin_unlock(&tpg->tpg_lun_lock);
142 dev = lun->lun_se_dev;
144 * By default in LIO-Target $FABRIC_MOD,
145 * demo_mode_write_protect is ON, or READ_ONLY;
147 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
148 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
151 * Allow only optical drives to issue R/W in default RO
154 if (dev->transport->get_device_type(dev) == TYPE_DISK)
155 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
157 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
160 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
161 " access for LUN in Demo Mode\n",
162 tpg->se_tpg_tfo->get_fabric_name(),
163 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
164 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
165 "READ-WRITE" : "READ-ONLY");
167 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
168 lun_access, acl, tpg);
169 spin_lock(&tpg->tpg_lun_lock);
171 spin_unlock(&tpg->tpg_lun_lock);
174 /* core_set_queue_depth_for_node():
178 static int core_set_queue_depth_for_node(
179 struct se_portal_group *tpg,
180 struct se_node_acl *acl)
182 if (!acl->queue_depth) {
183 pr_err("Queue depth for %s Initiator Node: %s is 0,"
184 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
186 acl->queue_depth = 1;
192 void array_free(void *array, int n)
197 for (i = 0; i < n; i++)
202 static void *array_zalloc(int n, size_t size, gfp_t flags)
207 a = kzalloc(n * sizeof(void*), flags);
210 for (i = 0; i < n; i++) {
211 a[i] = kzalloc(size, flags);
220 /* core_create_device_list_for_node():
224 static int core_create_device_list_for_node(struct se_node_acl *nacl)
226 struct se_dev_entry *deve;
229 nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
230 sizeof(struct se_dev_entry), GFP_KERNEL);
231 if (!nacl->device_list) {
232 pr_err("Unable to allocate memory for"
233 " struct se_node_acl->device_list\n");
236 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
237 deve = nacl->device_list[i];
239 atomic_set(&deve->ua_count, 0);
240 atomic_set(&deve->pr_ref_count, 0);
241 spin_lock_init(&deve->ua_lock);
242 INIT_LIST_HEAD(&deve->alua_port_list);
243 INIT_LIST_HEAD(&deve->ua_list);
249 /* core_tpg_check_initiator_node_acl()
253 struct se_node_acl *core_tpg_check_initiator_node_acl(
254 struct se_portal_group *tpg,
255 unsigned char *initiatorname)
257 struct se_node_acl *acl;
259 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
263 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
266 acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
270 INIT_LIST_HEAD(&acl->acl_list);
271 INIT_LIST_HEAD(&acl->acl_sess_list);
272 kref_init(&acl->acl_kref);
273 init_completion(&acl->acl_free_comp);
274 spin_lock_init(&acl->device_list_lock);
275 spin_lock_init(&acl->nacl_sess_lock);
276 atomic_set(&acl->acl_pr_ref_count, 0);
277 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
278 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
280 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
281 spin_lock_init(&acl->stats_lock);
282 acl->dynamic_node_acl = 1;
284 tpg->se_tpg_tfo->set_default_node_attributes(acl);
286 if (core_create_device_list_for_node(acl) < 0) {
287 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
291 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
292 core_free_device_list_for_node(acl, tpg);
293 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
297 * Here we only create demo-mode MappedLUNs from the active
298 * TPG LUNs if the fabric is not explicitly asking for
299 * tpg_check_demo_mode_login_only() == 1.
301 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
302 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
303 core_tpg_add_node_to_devs(acl, tpg);
305 spin_lock_irq(&tpg->acl_node_lock);
306 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
307 tpg->num_node_acls++;
308 spin_unlock_irq(&tpg->acl_node_lock);
310 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
311 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
312 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
313 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
317 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
319 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
321 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
325 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
330 spin_lock(&tpg->tpg_lun_lock);
331 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
332 lun = tpg->tpg_lun_list[i];
334 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
335 (lun->lun_se_dev == NULL))
338 spin_unlock(&tpg->tpg_lun_lock);
339 core_dev_del_lun(tpg, lun->unpacked_lun);
340 spin_lock(&tpg->tpg_lun_lock);
342 spin_unlock(&tpg->tpg_lun_lock);
344 EXPORT_SYMBOL(core_tpg_clear_object_luns);
346 /* core_tpg_add_initiator_node_acl():
350 struct se_node_acl *core_tpg_add_initiator_node_acl(
351 struct se_portal_group *tpg,
352 struct se_node_acl *se_nacl,
353 const char *initiatorname,
356 struct se_node_acl *acl = NULL;
358 spin_lock_irq(&tpg->acl_node_lock);
359 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
361 if (acl->dynamic_node_acl) {
362 acl->dynamic_node_acl = 0;
363 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
364 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
365 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
366 spin_unlock_irq(&tpg->acl_node_lock);
368 * Release the locally allocated struct se_node_acl
369 * because * core_tpg_add_initiator_node_acl() returned
370 * a pointer to an existing demo mode node ACL.
373 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
378 pr_err("ACL entry for %s Initiator"
379 " Node %s already exists for TPG %u, ignoring"
380 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
381 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
382 spin_unlock_irq(&tpg->acl_node_lock);
383 return ERR_PTR(-EEXIST);
385 spin_unlock_irq(&tpg->acl_node_lock);
388 pr_err("struct se_node_acl pointer is NULL\n");
389 return ERR_PTR(-EINVAL);
392 * For v4.x logic the se_node_acl_s is hanging off a fabric
393 * dependent structure allocated via
394 * struct target_core_fabric_ops->fabric_make_nodeacl()
398 INIT_LIST_HEAD(&acl->acl_list);
399 INIT_LIST_HEAD(&acl->acl_sess_list);
400 kref_init(&acl->acl_kref);
401 init_completion(&acl->acl_free_comp);
402 spin_lock_init(&acl->device_list_lock);
403 spin_lock_init(&acl->nacl_sess_lock);
404 atomic_set(&acl->acl_pr_ref_count, 0);
405 acl->queue_depth = queue_depth;
406 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
408 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
409 spin_lock_init(&acl->stats_lock);
411 tpg->se_tpg_tfo->set_default_node_attributes(acl);
413 if (core_create_device_list_for_node(acl) < 0) {
414 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
415 return ERR_PTR(-ENOMEM);
418 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
419 core_free_device_list_for_node(acl, tpg);
420 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
421 return ERR_PTR(-EINVAL);
424 spin_lock_irq(&tpg->acl_node_lock);
425 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
426 tpg->num_node_acls++;
427 spin_unlock_irq(&tpg->acl_node_lock);
430 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
431 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
432 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
433 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
437 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
439 /* core_tpg_del_initiator_node_acl():
443 int core_tpg_del_initiator_node_acl(
444 struct se_portal_group *tpg,
445 struct se_node_acl *acl,
448 LIST_HEAD(sess_list);
449 struct se_session *sess, *sess_tmp;
453 spin_lock_irq(&tpg->acl_node_lock);
454 if (acl->dynamic_node_acl) {
455 acl->dynamic_node_acl = 0;
457 list_del(&acl->acl_list);
458 tpg->num_node_acls--;
459 spin_unlock_irq(&tpg->acl_node_lock);
461 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
464 list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
466 if (sess->sess_tearing_down != 0)
469 target_get_session(sess);
470 list_move(&sess->sess_acl_list, &sess_list);
472 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
474 list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
475 list_del(&sess->sess_acl_list);
477 rc = tpg->se_tpg_tfo->shutdown_session(sess);
478 target_put_session(sess);
481 target_put_session(sess);
483 target_put_nacl(acl);
485 * Wait for last target_put_nacl() to complete in target_complete_nacl()
486 * for active fabric session transport_deregister_session() callbacks.
488 wait_for_completion(&acl->acl_free_comp);
490 core_tpg_wait_for_nacl_pr_ref(acl);
491 core_clear_initiator_node_from_tpg(acl, tpg);
492 core_free_device_list_for_node(acl, tpg);
494 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
495 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
496 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
497 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
501 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
503 /* core_tpg_set_initiator_node_queue_depth():
507 int core_tpg_set_initiator_node_queue_depth(
508 struct se_portal_group *tpg,
509 unsigned char *initiatorname,
513 struct se_session *sess, *init_sess = NULL;
514 struct se_node_acl *acl;
518 spin_lock_irq(&tpg->acl_node_lock);
519 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
521 pr_err("Access Control List entry for %s Initiator"
522 " Node %s does not exists for TPG %hu, ignoring"
523 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
524 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
525 spin_unlock_irq(&tpg->acl_node_lock);
528 if (acl->dynamic_node_acl) {
529 acl->dynamic_node_acl = 0;
532 spin_unlock_irq(&tpg->acl_node_lock);
534 spin_lock_irqsave(&tpg->session_lock, flags);
535 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
536 if (sess->se_node_acl != acl)
540 pr_err("Unable to change queue depth for %s"
541 " Initiator Node: %s while session is"
542 " operational. To forcefully change the queue"
543 " depth and force session reinstatement"
544 " use the \"force=1\" parameter.\n",
545 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
546 spin_unlock_irqrestore(&tpg->session_lock, flags);
548 spin_lock_irq(&tpg->acl_node_lock);
550 acl->dynamic_node_acl = 1;
551 spin_unlock_irq(&tpg->acl_node_lock);
555 * Determine if the session needs to be closed by our context.
557 if (!tpg->se_tpg_tfo->shutdown_session(sess))
565 * User has requested to change the queue depth for a Initiator Node.
566 * Change the value in the Node's struct se_node_acl, and call
567 * core_set_queue_depth_for_node() to add the requested queue depth.
569 * Finally call tpg->se_tpg_tfo->close_session() to force session
570 * reinstatement to occur if there is an active session for the
571 * $FABRIC_MOD Initiator Node in question.
573 acl->queue_depth = queue_depth;
575 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
576 spin_unlock_irqrestore(&tpg->session_lock, flags);
578 * Force session reinstatement if
579 * core_set_queue_depth_for_node() failed, because we assume
580 * the $FABRIC_MOD has already the set session reinstatement
581 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
584 tpg->se_tpg_tfo->close_session(init_sess);
586 spin_lock_irq(&tpg->acl_node_lock);
588 acl->dynamic_node_acl = 1;
589 spin_unlock_irq(&tpg->acl_node_lock);
592 spin_unlock_irqrestore(&tpg->session_lock, flags);
594 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
595 * forcefully shutdown the $FABRIC_MOD session/nexus.
598 tpg->se_tpg_tfo->close_session(init_sess);
600 pr_debug("Successfully changed queue depth to: %d for Initiator"
601 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
602 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
603 tpg->se_tpg_tfo->tpg_get_tag(tpg));
605 spin_lock_irq(&tpg->acl_node_lock);
607 acl->dynamic_node_acl = 1;
608 spin_unlock_irq(&tpg->acl_node_lock);
612 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
614 /* core_tpg_set_initiator_node_tag():
616 * Initiator nodeacl tags are not used internally, but may be used by
617 * userspace to emulate aliases or groups.
618 * Returns length of newly-set tag or -EINVAL.
620 int core_tpg_set_initiator_node_tag(
621 struct se_portal_group *tpg,
622 struct se_node_acl *acl,
625 if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
628 if (!strncmp("NULL", new_tag, 4)) {
629 acl->acl_tag[0] = '\0';
633 return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
635 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
637 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
639 /* Set in core_dev_setup_virtual_lun0() */
640 struct se_device *dev = g_lun0_dev;
641 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
642 u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
645 lun->unpacked_lun = 0;
646 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
647 atomic_set(&lun->lun_acl_count, 0);
648 init_completion(&lun->lun_shutdown_comp);
649 INIT_LIST_HEAD(&lun->lun_acl_list);
650 INIT_LIST_HEAD(&lun->lun_cmd_list);
651 spin_lock_init(&lun->lun_acl_lock);
652 spin_lock_init(&lun->lun_cmd_lock);
653 spin_lock_init(&lun->lun_sep_lock);
655 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
662 static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
664 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
666 core_tpg_post_dellun(se_tpg, lun);
669 int core_tpg_register(
670 struct target_core_fabric_ops *tfo,
671 struct se_wwn *se_wwn,
672 struct se_portal_group *se_tpg,
673 void *tpg_fabric_ptr,
679 se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
680 sizeof(struct se_lun), GFP_KERNEL);
681 if (!se_tpg->tpg_lun_list) {
682 pr_err("Unable to allocate struct se_portal_group->"
687 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
688 lun = se_tpg->tpg_lun_list[i];
689 lun->unpacked_lun = i;
690 lun->lun_link_magic = SE_LUN_LINK_MAGIC;
691 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
692 atomic_set(&lun->lun_acl_count, 0);
693 init_completion(&lun->lun_shutdown_comp);
694 INIT_LIST_HEAD(&lun->lun_acl_list);
695 INIT_LIST_HEAD(&lun->lun_cmd_list);
696 spin_lock_init(&lun->lun_acl_lock);
697 spin_lock_init(&lun->lun_cmd_lock);
698 spin_lock_init(&lun->lun_sep_lock);
701 se_tpg->se_tpg_type = se_tpg_type;
702 se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
703 se_tpg->se_tpg_tfo = tfo;
704 se_tpg->se_tpg_wwn = se_wwn;
705 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
706 INIT_LIST_HEAD(&se_tpg->acl_node_list);
707 INIT_LIST_HEAD(&se_tpg->se_tpg_node);
708 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
709 spin_lock_init(&se_tpg->acl_node_lock);
710 spin_lock_init(&se_tpg->session_lock);
711 spin_lock_init(&se_tpg->tpg_lun_lock);
713 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
714 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
715 array_free(se_tpg->tpg_lun_list,
716 TRANSPORT_MAX_LUNS_PER_TPG);
721 spin_lock_bh(&tpg_lock);
722 list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
723 spin_unlock_bh(&tpg_lock);
725 pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
726 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
727 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
728 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
729 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
733 EXPORT_SYMBOL(core_tpg_register);
735 int core_tpg_deregister(struct se_portal_group *se_tpg)
737 struct se_node_acl *nacl, *nacl_tmp;
739 pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
740 " for endpoint: %s Portal Tag %u\n",
741 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
742 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
743 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
744 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
746 spin_lock_bh(&tpg_lock);
747 list_del(&se_tpg->se_tpg_node);
748 spin_unlock_bh(&tpg_lock);
750 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
753 * Release any remaining demo-mode generated se_node_acl that have
754 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
755 * in transport_deregister_session().
757 spin_lock_irq(&se_tpg->acl_node_lock);
758 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
760 list_del(&nacl->acl_list);
761 se_tpg->num_node_acls--;
762 spin_unlock_irq(&se_tpg->acl_node_lock);
764 core_tpg_wait_for_nacl_pr_ref(nacl);
765 core_free_device_list_for_node(nacl, se_tpg);
766 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
768 spin_lock_irq(&se_tpg->acl_node_lock);
770 spin_unlock_irq(&se_tpg->acl_node_lock);
772 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
773 core_tpg_release_virtual_lun0(se_tpg);
775 se_tpg->se_tpg_fabric_ptr = NULL;
776 array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
779 EXPORT_SYMBOL(core_tpg_deregister);
781 struct se_lun *core_tpg_pre_addlun(
782 struct se_portal_group *tpg,
787 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
788 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
789 "-1: %u for Target Portal Group: %u\n",
790 tpg->se_tpg_tfo->get_fabric_name(),
791 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
792 tpg->se_tpg_tfo->tpg_get_tag(tpg));
793 return ERR_PTR(-EOVERFLOW);
796 spin_lock(&tpg->tpg_lun_lock);
797 lun = tpg->tpg_lun_list[unpacked_lun];
798 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
799 pr_err("TPG Logical Unit Number: %u is already active"
800 " on %s Target Portal Group: %u, ignoring request.\n",
801 unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
802 tpg->se_tpg_tfo->tpg_get_tag(tpg));
803 spin_unlock(&tpg->tpg_lun_lock);
804 return ERR_PTR(-EINVAL);
806 spin_unlock(&tpg->tpg_lun_lock);
811 int core_tpg_post_addlun(
812 struct se_portal_group *tpg,
819 ret = core_dev_export(lun_ptr, tpg, lun);
823 spin_lock(&tpg->tpg_lun_lock);
824 lun->lun_access = lun_access;
825 lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
826 spin_unlock(&tpg->tpg_lun_lock);
831 static void core_tpg_shutdown_lun(
832 struct se_portal_group *tpg,
835 core_clear_lun_from_tpg(lun, tpg);
836 transport_clear_lun_from_sessions(lun);
839 struct se_lun *core_tpg_pre_dellun(
840 struct se_portal_group *tpg,
845 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
846 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
847 "-1: %u for Target Portal Group: %u\n",
848 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
849 TRANSPORT_MAX_LUNS_PER_TPG-1,
850 tpg->se_tpg_tfo->tpg_get_tag(tpg));
851 return ERR_PTR(-EOVERFLOW);
854 spin_lock(&tpg->tpg_lun_lock);
855 lun = tpg->tpg_lun_list[unpacked_lun];
856 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
857 pr_err("%s Logical Unit Number: %u is not active on"
858 " Target Portal Group: %u, ignoring request.\n",
859 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
860 tpg->se_tpg_tfo->tpg_get_tag(tpg));
861 spin_unlock(&tpg->tpg_lun_lock);
862 return ERR_PTR(-ENODEV);
864 spin_unlock(&tpg->tpg_lun_lock);
869 int core_tpg_post_dellun(
870 struct se_portal_group *tpg,
873 core_tpg_shutdown_lun(tpg, lun);
875 core_dev_unexport(lun->lun_se_dev, tpg, lun);
877 spin_lock(&tpg->tpg_lun_lock);
878 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
879 spin_unlock(&tpg->tpg_lun_lock);