2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
3 * Copyright (C) 2014 Red Hat, Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 #include <linux/spinlock.h>
20 #include <linux/module.h>
21 #include <linux/idr.h>
22 #include <linux/timer.h>
23 #include <linux/parser.h>
24 #include <linux/vmalloc.h>
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_host.h>
27 #include <linux/uio_driver.h>
28 #include <net/genetlink.h>
29 #include <target/target_core_base.h>
30 #include <target/target_core_fabric.h>
31 #include <target/target_core_backend.h>
32 #include <target/target_core_backend_configfs.h>
34 #include <linux/target_core_user.h>
37 * Define a shared-memory interface for LIO to pass SCSI commands and
38 * data to userspace for processing. This is to allow backends that
39 * are too complex for in-kernel support to be possible.
41 * It uses the UIO framework to do a lot of the device-creation and
42 * introspection work for us.
44 * See the .h file for how the ring is laid out. Note that while the
45 * command ring is defined, the particulars of the data area are
46 * not. Offset values in the command entry point to other locations
47 * internal to the mmap()ed area. There is separate space outside the
48 * command ring for data buffers. This leaves maximum flexibility for
49 * moving buffer allocations, or even page flipping or other
50 * allocation techniques, without altering the command ring layout.
53 * The user process must be assumed to be malicious. There's no way to
54 * prevent it breaking the command ring protocol if it wants, but in
55 * order to prevent other issues we must only ever read *data* from
56 * the shared memory area, not offsets or sizes. This applies to
57 * command ring entries as well as the mailbox. Extra code needed for
58 * this may have a 'UAM' comment.
62 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
64 #define CMDR_SIZE (16 * 4096)
65 #define DATA_SIZE (257 * 4096)
67 #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
69 static struct device *tcmu_root_device;
75 #define TCMU_CONFIG_LEN 256
78 struct se_device se_dev;
83 #define TCMU_DEV_BIT_OPEN 0
84 #define TCMU_DEV_BIT_BROKEN 1
87 struct uio_info uio_info;
89 struct tcmu_mailbox *mb_addr;
92 u32 cmdr_last_cleaned;
93 /* Offset of data ring from start of mb */
96 /* Ring head + tail values. */
97 /* Must add data_off and mb_addr to get the address */
101 wait_queue_head_t wait_cmdr;
102 /* TODO should this be a mutex? */
103 spinlock_t cmdr_lock;
106 spinlock_t commands_lock;
108 struct timer_list timeout;
110 char dev_config[TCMU_CONFIG_LEN];
113 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
115 #define CMDR_OFF sizeof(struct tcmu_mailbox)
118 struct se_cmd *se_cmd;
119 struct tcmu_dev *tcmu_dev;
123 /* Can't use se_cmd->data_length when cleaning up expired cmds, because if
124 cmd has been completed then accessing se_cmd is off limits */
127 unsigned long deadline;
129 #define TCMU_CMD_BIT_EXPIRED 0
133 static struct kmem_cache *tcmu_cmd_cache;
135 /* multicast group */
136 enum tcmu_multicast_groups {
140 static const struct genl_multicast_group tcmu_mcgrps[] = {
141 [TCMU_MCGRP_CONFIG] = { .name = "config", },
144 /* Our generic netlink family */
145 static struct genl_family tcmu_genl_family = {
146 .id = GENL_ID_GENERATE,
150 .maxattr = TCMU_ATTR_MAX,
151 .mcgrps = tcmu_mcgrps,
152 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
155 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
157 struct se_device *se_dev = se_cmd->se_dev;
158 struct tcmu_dev *udev = TCMU_DEV(se_dev);
159 struct tcmu_cmd *tcmu_cmd;
162 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
166 tcmu_cmd->se_cmd = se_cmd;
167 tcmu_cmd->tcmu_dev = udev;
168 tcmu_cmd->data_length = se_cmd->data_length;
170 tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
172 idr_preload(GFP_KERNEL);
173 spin_lock_irq(&udev->commands_lock);
174 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
175 USHRT_MAX, GFP_NOWAIT);
176 spin_unlock_irq(&udev->commands_lock);
180 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
183 tcmu_cmd->cmd_id = cmd_id;
188 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
190 unsigned long offset = (unsigned long) vaddr & ~PAGE_MASK;
192 size = round_up(size+offset, PAGE_SIZE);
196 flush_dcache_page(virt_to_page(vaddr));
202 * Some ring helper functions. We don't assume size is a power of 2 so
203 * we can't use circ_buf.h.
205 static inline size_t spc_used(size_t head, size_t tail, size_t size)
207 int diff = head - tail;
215 static inline size_t spc_free(size_t head, size_t tail, size_t size)
217 /* Keep 1 byte unused or we can't tell full from empty */
218 return (size - spc_used(head, tail, size) - 1);
221 static inline size_t head_to_end(size_t head, size_t size)
226 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
229 * We can't queue a command until we have space available on the cmd ring *and* space
230 * space avail on the data ring.
232 * Called with ring lock held.
234 static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed)
236 struct tcmu_mailbox *mb = udev->mb_addr;
241 tcmu_flush_dcache_range(mb, sizeof(*mb));
243 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
246 * If cmd end-of-ring space is too small then we need space for a NOP plus
247 * original cmd - cmds are internally contiguous.
249 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
250 cmd_needed = cmd_size;
252 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
254 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
255 if (space < cmd_needed) {
256 pr_debug("no cmd space: %u %u %u\n", cmd_head,
257 udev->cmdr_last_cleaned, udev->cmdr_size);
261 space = spc_free(udev->data_head, udev->data_tail, udev->data_size);
262 if (space < data_needed) {
263 pr_debug("no data space: %zu %zu %zu\n", udev->data_head,
264 udev->data_tail, udev->data_size);
271 static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
273 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
274 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
275 size_t base_command_size, command_size;
276 struct tcmu_mailbox *mb;
277 struct tcmu_cmd_entry *entry;
279 struct scatterlist *sg;
285 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
289 * Must be a certain minimum size for response sense info, but
290 * also may be larger if the iov array is large.
292 * iovs = sgl_nents+1, for end-of-ring case, plus another 1
293 * b/c size == offsetof one-past-element.
295 base_command_size = max(offsetof(struct tcmu_cmd_entry,
296 req.iov[se_cmd->t_data_nents + 2]),
297 sizeof(struct tcmu_cmd_entry));
298 command_size = base_command_size
299 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
301 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
303 spin_lock_irq(&udev->cmdr_lock);
306 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
307 if ((command_size > (udev->cmdr_size / 2))
308 || tcmu_cmd->data_length > (udev->data_size - 1))
309 pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
310 "cmd/data ring buffers\n", command_size, tcmu_cmd->data_length,
311 udev->cmdr_size, udev->data_size);
313 while (!is_ring_space_avail(udev, command_size, tcmu_cmd->data_length)) {
317 prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE);
319 pr_debug("sleeping for ring space\n");
320 spin_unlock_irq(&udev->cmdr_lock);
321 ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
322 finish_wait(&udev->wait_cmdr, &__wait);
324 pr_warn("tcmu: command timed out\n");
328 spin_lock_irq(&udev->cmdr_lock);
330 /* We dropped cmdr_lock, cmd_head is stale */
331 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
334 /* Insert a PAD if end-of-ring space is too small */
335 if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
336 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
338 entry = (void *) mb + CMDR_OFF + cmd_head;
339 tcmu_flush_dcache_range(entry, sizeof(*entry));
340 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
341 tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
342 entry->hdr.cmd_id = 0; /* not used for PAD */
343 entry->hdr.kflags = 0;
344 entry->hdr.uflags = 0;
346 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
348 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
349 WARN_ON(cmd_head != 0);
352 entry = (void *) mb + CMDR_OFF + cmd_head;
353 tcmu_flush_dcache_range(entry, sizeof(*entry));
354 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
355 tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
356 entry->hdr.cmd_id = tcmu_cmd->cmd_id;
357 entry->hdr.kflags = 0;
358 entry->hdr.uflags = 0;
361 * Fix up iovecs, and handle if allocation in data ring wrapped.
363 iov = &entry->req.iov[0];
364 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) {
365 size_t copy_bytes = min((size_t)sg->length,
366 head_to_end(udev->data_head, udev->data_size));
367 void *from = kmap_atomic(sg_page(sg)) + sg->offset;
368 void *to = (void *) mb + udev->data_off + udev->data_head;
370 if (tcmu_cmd->se_cmd->data_direction == DMA_TO_DEVICE) {
371 memcpy(to, from, copy_bytes);
372 tcmu_flush_dcache_range(to, copy_bytes);
375 /* Even iov_base is relative to mb_addr */
376 iov->iov_len = copy_bytes;
377 iov->iov_base = (void __user *) udev->data_off +
382 UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
384 /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */
385 if (sg->length != copy_bytes) {
387 copy_bytes = sg->length - copy_bytes;
389 iov->iov_len = copy_bytes;
390 iov->iov_base = (void __user *) udev->data_off +
393 if (se_cmd->data_direction == DMA_TO_DEVICE) {
394 to = (void *) mb + udev->data_off + udev->data_head;
395 memcpy(to, from, copy_bytes);
396 tcmu_flush_dcache_range(to, copy_bytes);
402 UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
407 entry->req.iov_cnt = iov_cnt;
408 entry->req.iov_bidi_cnt = 0;
409 entry->req.iov_dif_cnt = 0;
411 /* All offsets relative to mb_addr, not start of entry! */
412 cdb_off = CMDR_OFF + cmd_head + base_command_size;
413 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
414 entry->req.cdb_off = cdb_off;
415 tcmu_flush_dcache_range(entry, sizeof(*entry));
417 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
418 tcmu_flush_dcache_range(mb, sizeof(*mb));
420 spin_unlock_irq(&udev->cmdr_lock);
422 /* TODO: only if FLUSH and FUA? */
423 uio_event_notify(&udev->uio_info);
425 mod_timer(&udev->timeout,
426 round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
431 static int tcmu_queue_cmd(struct se_cmd *se_cmd)
433 struct se_device *se_dev = se_cmd->se_dev;
434 struct tcmu_dev *udev = TCMU_DEV(se_dev);
435 struct tcmu_cmd *tcmu_cmd;
438 tcmu_cmd = tcmu_alloc_cmd(se_cmd);
442 ret = tcmu_queue_cmd_ring(tcmu_cmd);
444 pr_err("TCMU: Could not queue command\n");
445 spin_lock_irq(&udev->commands_lock);
446 idr_remove(&udev->commands, tcmu_cmd->cmd_id);
447 spin_unlock_irq(&udev->commands_lock);
449 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
455 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
457 struct se_cmd *se_cmd = cmd->se_cmd;
458 struct tcmu_dev *udev = cmd->tcmu_dev;
460 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
461 /* cmd has been completed already from timeout, just reclaim data
463 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
467 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
468 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
469 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
471 transport_generic_request_failure(cmd->se_cmd,
472 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
474 kmem_cache_free(tcmu_cmd_cache, cmd);
478 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
479 memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
480 se_cmd->scsi_sense_length);
482 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
484 else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
485 struct scatterlist *sg;
488 /* It'd be easier to look at entry's iovec again, but UAM */
489 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) {
494 copy_bytes = min((size_t)sg->length,
495 head_to_end(udev->data_tail, udev->data_size));
497 to = kmap_atomic(sg_page(sg)) + sg->offset;
498 WARN_ON(sg->length + sg->offset > PAGE_SIZE);
499 from = (void *) udev->mb_addr + udev->data_off + udev->data_tail;
500 tcmu_flush_dcache_range(from, copy_bytes);
501 memcpy(to, from, copy_bytes);
503 UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
505 /* Uh oh, wrapped the data buffer for this sg's data */
506 if (sg->length != copy_bytes) {
507 from = (void *) udev->mb_addr + udev->data_off + udev->data_tail;
508 WARN_ON(udev->data_tail);
510 copy_bytes = sg->length - copy_bytes;
511 tcmu_flush_dcache_range(from, copy_bytes);
512 memcpy(to, from, copy_bytes);
514 UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
520 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
521 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
523 pr_warn("TCMU: data direction was %d!\n", se_cmd->data_direction);
526 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
529 kmem_cache_free(tcmu_cmd_cache, cmd);
532 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
534 struct tcmu_mailbox *mb;
539 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
540 pr_err("ring broken, not handling completions\n");
544 spin_lock_irqsave(&udev->cmdr_lock, flags);
547 tcmu_flush_dcache_range(mb, sizeof(*mb));
549 while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) {
551 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
552 struct tcmu_cmd *cmd;
554 tcmu_flush_dcache_range(entry, sizeof(*entry));
556 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
557 UPDATE_HEAD(udev->cmdr_last_cleaned,
558 tcmu_hdr_get_len(entry->hdr.len_op),
562 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
564 spin_lock(&udev->commands_lock);
565 cmd = idr_find(&udev->commands, entry->hdr.cmd_id);
567 idr_remove(&udev->commands, cmd->cmd_id);
568 spin_unlock(&udev->commands_lock);
571 pr_err("cmd_id not found, ring is broken\n");
572 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
576 tcmu_handle_completion(cmd, entry);
578 UPDATE_HEAD(udev->cmdr_last_cleaned,
579 tcmu_hdr_get_len(entry->hdr.len_op),
585 if (mb->cmd_tail == mb->cmd_head)
586 del_timer(&udev->timeout); /* no more pending cmds */
588 spin_unlock_irqrestore(&udev->cmdr_lock, flags);
590 wake_up(&udev->wait_cmdr);
595 static int tcmu_check_expired_cmd(int id, void *p, void *data)
597 struct tcmu_cmd *cmd = p;
599 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
602 if (!time_after(cmd->deadline, jiffies))
605 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
606 target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
609 kmem_cache_free(tcmu_cmd_cache, cmd);
614 static void tcmu_device_timedout(unsigned long data)
616 struct tcmu_dev *udev = (struct tcmu_dev *)data;
620 handled = tcmu_handle_completions(udev);
622 pr_warn("%d completions handled from timeout\n", handled);
624 spin_lock_irqsave(&udev->commands_lock, flags);
625 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
626 spin_unlock_irqrestore(&udev->commands_lock, flags);
629 * We don't need to wakeup threads on wait_cmdr since they have their
634 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
636 struct tcmu_hba *tcmu_hba;
638 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
642 tcmu_hba->host_id = host_id;
643 hba->hba_ptr = tcmu_hba;
648 static void tcmu_detach_hba(struct se_hba *hba)
654 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
656 struct tcmu_dev *udev;
658 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
662 udev->name = kstrdup(name, GFP_KERNEL);
670 init_waitqueue_head(&udev->wait_cmdr);
671 spin_lock_init(&udev->cmdr_lock);
673 idr_init(&udev->commands);
674 spin_lock_init(&udev->commands_lock);
676 setup_timer(&udev->timeout, tcmu_device_timedout,
677 (unsigned long)udev);
679 return &udev->se_dev;
682 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
684 struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info);
686 tcmu_handle_completions(tcmu_dev);
692 * mmap code from uio.c. Copied here because we want to hook mmap()
693 * and this stuff must come along.
695 static int tcmu_find_mem_index(struct vm_area_struct *vma)
697 struct tcmu_dev *udev = vma->vm_private_data;
698 struct uio_info *info = &udev->uio_info;
700 if (vma->vm_pgoff < MAX_UIO_MAPS) {
701 if (info->mem[vma->vm_pgoff].size == 0)
703 return (int)vma->vm_pgoff;
708 static int tcmu_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
710 struct tcmu_dev *udev = vma->vm_private_data;
711 struct uio_info *info = &udev->uio_info;
713 unsigned long offset;
716 int mi = tcmu_find_mem_index(vma);
718 return VM_FAULT_SIGBUS;
721 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
724 offset = (vmf->pgoff - mi) << PAGE_SHIFT;
726 addr = (void *)(unsigned long)info->mem[mi].addr + offset;
727 if (info->mem[mi].memtype == UIO_MEM_LOGICAL)
728 page = virt_to_page(addr);
730 page = vmalloc_to_page(addr);
736 static const struct vm_operations_struct tcmu_vm_ops = {
737 .fault = tcmu_vma_fault,
740 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
742 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
744 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
745 vma->vm_ops = &tcmu_vm_ops;
747 vma->vm_private_data = udev;
749 /* Ensure the mmap is exactly the right size */
750 if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT))
756 static int tcmu_open(struct uio_info *info, struct inode *inode)
758 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
760 /* O_EXCL not supported for char devs, so fake it? */
761 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
769 static int tcmu_release(struct uio_info *info, struct inode *inode)
771 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
773 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
780 static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor)
786 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
790 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
794 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name);
798 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor);
802 genlmsg_end(skb, msg_header);
804 ret = genlmsg_multicast(&tcmu_genl_family, skb, 0,
805 TCMU_MCGRP_CONFIG, GFP_KERNEL);
807 /* We don't care if no one is listening */
817 static int tcmu_configure_device(struct se_device *dev)
819 struct tcmu_dev *udev = TCMU_DEV(dev);
820 struct tcmu_hba *hba = udev->hba->hba_ptr;
821 struct uio_info *info;
822 struct tcmu_mailbox *mb;
828 info = &udev->uio_info;
830 size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
832 size += 1; /* for \0 */
833 str = kmalloc(size, GFP_KERNEL);
837 used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
839 if (udev->dev_config[0])
840 snprintf(str + used, size - used, "/%s", udev->dev_config);
844 udev->mb_addr = vzalloc(TCMU_RING_SIZE);
845 if (!udev->mb_addr) {
850 /* mailbox fits in first part of CMDR space */
851 udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
852 udev->data_off = CMDR_SIZE;
853 udev->data_size = TCMU_RING_SIZE - CMDR_SIZE;
856 mb->version = TCMU_MAILBOX_VERSION;
857 mb->cmdr_off = CMDR_OFF;
858 mb->cmdr_size = udev->cmdr_size;
860 WARN_ON(!PAGE_ALIGNED(udev->data_off));
861 WARN_ON(udev->data_size % PAGE_SIZE);
863 info->version = xstr(TCMU_MAILBOX_VERSION);
865 info->mem[0].name = "tcm-user command & data buffer";
866 info->mem[0].addr = (phys_addr_t) udev->mb_addr;
867 info->mem[0].size = TCMU_RING_SIZE;
868 info->mem[0].memtype = UIO_MEM_VIRTUAL;
870 info->irqcontrol = tcmu_irqcontrol;
871 info->irq = UIO_IRQ_CUSTOM;
873 info->mmap = tcmu_mmap;
874 info->open = tcmu_open;
875 info->release = tcmu_release;
877 ret = uio_register_device(tcmu_root_device, info);
881 /* Other attributes can be configured in userspace */
882 dev->dev_attrib.hw_block_size = 512;
883 dev->dev_attrib.hw_max_sectors = 128;
884 dev->dev_attrib.hw_queue_depth = 128;
886 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
887 udev->uio_info.uio_dev->minor);
894 uio_unregister_device(&udev->uio_info);
896 vfree(udev->mb_addr);
903 static int tcmu_check_pending_cmd(int id, void *p, void *data)
905 struct tcmu_cmd *cmd = p;
907 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
912 static void tcmu_free_device(struct se_device *dev)
914 struct tcmu_dev *udev = TCMU_DEV(dev);
917 del_timer_sync(&udev->timeout);
919 vfree(udev->mb_addr);
921 /* Upper layer should drain all requests before calling this */
922 spin_lock_irq(&udev->commands_lock);
923 i = idr_for_each(&udev->commands, tcmu_check_pending_cmd, NULL);
924 idr_destroy(&udev->commands);
925 spin_unlock_irq(&udev->commands_lock);
928 /* Device was configured */
929 if (udev->uio_info.uio_dev) {
930 tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
931 udev->uio_info.uio_dev->minor);
933 uio_unregister_device(&udev->uio_info);
934 kfree(udev->uio_info.name);
942 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err,
945 static match_table_t tokens = {
946 {Opt_dev_config, "dev_config=%s"},
947 {Opt_dev_size, "dev_size=%u"},
948 {Opt_hw_block_size, "hw_block_size=%u"},
952 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
953 const char *page, ssize_t count)
955 struct tcmu_dev *udev = TCMU_DEV(dev);
956 char *orig, *ptr, *opts, *arg_p;
957 substring_t args[MAX_OPT_ARGS];
959 unsigned long tmp_ul;
961 opts = kstrdup(page, GFP_KERNEL);
967 while ((ptr = strsep(&opts, ",\n")) != NULL) {
971 token = match_token(ptr, tokens, args);
974 if (match_strlcpy(udev->dev_config, &args[0],
975 TCMU_CONFIG_LEN) == 0) {
979 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
982 arg_p = match_strdup(&args[0]);
987 ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
990 pr_err("kstrtoul() failed for dev_size=\n");
992 case Opt_hw_block_size:
993 arg_p = match_strdup(&args[0]);
998 ret = kstrtoul(arg_p, 0, &tmp_ul);
1001 pr_err("kstrtoul() failed for hw_block_size=\n");
1005 pr_err("hw_block_size must be nonzero\n");
1008 dev->dev_attrib.hw_block_size = tmp_ul;
1016 return (!ret) ? count : ret;
1019 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
1021 struct tcmu_dev *udev = TCMU_DEV(dev);
1024 bl = sprintf(b + bl, "Config: %s ",
1025 udev->dev_config[0] ? udev->dev_config : "NULL");
1026 bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size);
1031 static sector_t tcmu_get_blocks(struct se_device *dev)
1033 struct tcmu_dev *udev = TCMU_DEV(dev);
1035 return div_u64(udev->dev_size - dev->dev_attrib.block_size,
1036 dev->dev_attrib.block_size);
1039 static sense_reason_t
1040 tcmu_pass_op(struct se_cmd *se_cmd)
1042 int ret = tcmu_queue_cmd(se_cmd);
1045 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1047 return TCM_NO_SENSE;
1050 static sense_reason_t
1051 tcmu_parse_cdb(struct se_cmd *cmd)
1053 return passthrough_parse_cdb(cmd, tcmu_pass_op);
1056 DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type);
1057 TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type);
1059 DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size);
1060 TB_DEV_ATTR_RO(tcmu, hw_block_size);
1062 DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors);
1063 TB_DEV_ATTR_RO(tcmu, hw_max_sectors);
1065 DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth);
1066 TB_DEV_ATTR_RO(tcmu, hw_queue_depth);
1068 static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
1069 &tcmu_dev_attrib_hw_pi_prot_type.attr,
1070 &tcmu_dev_attrib_hw_block_size.attr,
1071 &tcmu_dev_attrib_hw_max_sectors.attr,
1072 &tcmu_dev_attrib_hw_queue_depth.attr,
1076 static struct se_subsystem_api tcmu_template = {
1078 .inquiry_prod = "USER",
1079 .inquiry_rev = TCMU_VERSION,
1080 .owner = THIS_MODULE,
1081 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
1082 .attach_hba = tcmu_attach_hba,
1083 .detach_hba = tcmu_detach_hba,
1084 .alloc_device = tcmu_alloc_device,
1085 .configure_device = tcmu_configure_device,
1086 .free_device = tcmu_free_device,
1087 .parse_cdb = tcmu_parse_cdb,
1088 .set_configfs_dev_params = tcmu_set_configfs_dev_params,
1089 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
1090 .get_device_type = sbc_get_device_type,
1091 .get_blocks = tcmu_get_blocks,
1094 static int __init tcmu_module_init(void)
1096 struct target_backend_cits *tbc = &tcmu_template.tb_cits;
1099 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
1101 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
1102 sizeof(struct tcmu_cmd),
1103 __alignof__(struct tcmu_cmd),
1105 if (!tcmu_cmd_cache)
1108 tcmu_root_device = root_device_register("tcm_user");
1109 if (IS_ERR(tcmu_root_device)) {
1110 ret = PTR_ERR(tcmu_root_device);
1111 goto out_free_cache;
1114 ret = genl_register_family(&tcmu_genl_family);
1116 goto out_unreg_device;
1119 target_core_setup_sub_cits(&tcmu_template);
1120 tbc->tb_dev_attrib_cit.ct_attrs = tcmu_backend_dev_attrs;
1122 ret = transport_subsystem_register(&tcmu_template);
1124 goto out_unreg_genl;
1129 genl_unregister_family(&tcmu_genl_family);
1131 root_device_unregister(tcmu_root_device);
1133 kmem_cache_destroy(tcmu_cmd_cache);
1138 static void __exit tcmu_module_exit(void)
1140 transport_subsystem_release(&tcmu_template);
1141 genl_unregister_family(&tcmu_genl_family);
1142 root_device_unregister(tcmu_root_device);
1143 kmem_cache_destroy(tcmu_cmd_cache);
1146 MODULE_DESCRIPTION("TCM USER subsystem plugin");
1147 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
1148 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
1149 MODULE_LICENSE("GPL");
1151 module_init(tcmu_module_init);
1152 module_exit(tcmu_module_exit);