2 * kvm eventfd support - use eventfd objects to signal various KVM events
4 * Copyright 2009 Novell. All Rights Reserved.
5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 * Gregory Haskins <ghaskins@novell.com>
10 * This file is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License
12 * as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
24 #include <linux/kvm_host.h>
25 #include <linux/kvm.h>
26 #include <linux/workqueue.h>
27 #include <linux/syscalls.h>
28 #include <linux/wait.h>
29 #include <linux/poll.h>
30 #include <linux/file.h>
31 #include <linux/list.h>
32 #include <linux/eventfd.h>
33 #include <linux/kernel.h>
34 #include <linux/srcu.h>
35 #include <linux/slab.h>
36 #include <linux/seqlock.h>
40 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
42 * --------------------------------------------------------------------
43 * irqfd: Allows an fd to be used to inject an interrupt to the guest
45 * Credit goes to Avi Kivity for the original idea.
46 * --------------------------------------------------------------------
50 * Resampling irqfds are a special variety of irqfds used to emulate
51 * level triggered interrupts. The interrupt is asserted on eventfd
52 * trigger. On acknowledgement through the irq ack notifier, the
53 * interrupt is de-asserted and userspace is notified through the
54 * resamplefd. All resamplers on the same gsi are de-asserted
55 * together, so we don't need to track the state of each individual
56 * user. We can also therefore share the same irq source ID.
58 struct _irqfd_resampler {
61 * List of resampling struct _irqfd objects sharing this gsi.
62 * RCU list modified under kvm->irqfds.resampler_lock
64 struct list_head list;
65 struct kvm_irq_ack_notifier notifier;
67 * Entry in list of kvm->irqfd.resampler_list. Use for sharing
68 * resamplers among irqfds on the same gsi.
69 * Accessed and modified under kvm->irqfds.resampler_lock
71 struct list_head link;
75 /* Used for MSI fast-path */
78 /* Update side is protected by irqfds.lock */
79 struct kvm_kernel_irq_routing_entry irq_entry;
80 seqcount_t irq_entry_sc;
81 /* Used for level IRQ fast-path */
83 struct work_struct inject;
84 /* The resampler used by this irqfd (resampler-only) */
85 struct _irqfd_resampler *resampler;
86 /* Eventfd notified on resample (resampler-only) */
87 struct eventfd_ctx *resamplefd;
88 /* Entry in list of irqfds for a resampler (resampler-only) */
89 struct list_head resampler_link;
90 /* Used for setup/shutdown */
91 struct eventfd_ctx *eventfd;
92 struct list_head list;
94 struct work_struct shutdown;
97 static struct workqueue_struct *irqfd_cleanup_wq;
100 irqfd_inject(struct work_struct *work)
102 struct _irqfd *irqfd = container_of(work, struct _irqfd, inject);
103 struct kvm *kvm = irqfd->kvm;
105 if (!irqfd->resampler) {
106 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
108 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
111 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
112 irqfd->gsi, 1, false);
116 * Since resampler irqfds share an IRQ source ID, we de-assert once
117 * then notify all of the resampler irqfds using this GSI. We can't
118 * do multiple de-asserts or we risk racing with incoming re-asserts.
121 irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
123 struct _irqfd_resampler *resampler;
125 struct _irqfd *irqfd;
128 resampler = container_of(kian, struct _irqfd_resampler, notifier);
129 kvm = resampler->kvm;
131 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
132 resampler->notifier.gsi, 0, false);
134 idx = srcu_read_lock(&kvm->irq_srcu);
136 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
137 eventfd_signal(irqfd->resamplefd, 1);
139 srcu_read_unlock(&kvm->irq_srcu, idx);
143 irqfd_resampler_shutdown(struct _irqfd *irqfd)
145 struct _irqfd_resampler *resampler = irqfd->resampler;
146 struct kvm *kvm = resampler->kvm;
148 mutex_lock(&kvm->irqfds.resampler_lock);
150 list_del_rcu(&irqfd->resampler_link);
151 synchronize_srcu(&kvm->irq_srcu);
153 if (list_empty(&resampler->list)) {
154 list_del(&resampler->link);
155 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
156 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
157 resampler->notifier.gsi, 0, false);
161 mutex_unlock(&kvm->irqfds.resampler_lock);
165 * Race-free decouple logic (ordering is critical)
168 irqfd_shutdown(struct work_struct *work)
170 struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown);
174 * Synchronize with the wait-queue and unhook ourselves to prevent
177 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
180 * We know no new events will be scheduled at this point, so block
181 * until all previously outstanding events have completed
183 flush_work(&irqfd->inject);
185 if (irqfd->resampler) {
186 irqfd_resampler_shutdown(irqfd);
187 eventfd_ctx_put(irqfd->resamplefd);
191 * It is now safe to release the object's resources
193 eventfd_ctx_put(irqfd->eventfd);
198 /* assumes kvm->irqfds.lock is held */
200 irqfd_is_active(struct _irqfd *irqfd)
202 return list_empty(&irqfd->list) ? false : true;
206 * Mark the irqfd as inactive and schedule it for removal
208 * assumes kvm->irqfds.lock is held
211 irqfd_deactivate(struct _irqfd *irqfd)
213 BUG_ON(!irqfd_is_active(irqfd));
215 list_del_init(&irqfd->list);
217 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
221 * Called with wqh->lock held and interrupts disabled
224 irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
226 struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
227 unsigned long flags = (unsigned long)key;
228 struct kvm_kernel_irq_routing_entry irq;
229 struct kvm *kvm = irqfd->kvm;
233 if (flags & POLLIN) {
234 idx = srcu_read_lock(&kvm->irq_srcu);
236 seq = read_seqcount_begin(&irqfd->irq_entry_sc);
237 irq = irqfd->irq_entry;
238 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
239 /* An event has been signaled, inject an interrupt */
240 if (irq.type == KVM_IRQ_ROUTING_MSI)
241 kvm_set_msi(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
244 schedule_work(&irqfd->inject);
245 srcu_read_unlock(&kvm->irq_srcu, idx);
248 if (flags & POLLHUP) {
249 /* The eventfd is closing, detach from KVM */
252 spin_lock_irqsave(&kvm->irqfds.lock, flags);
255 * We must check if someone deactivated the irqfd before
256 * we could acquire the irqfds.lock since the item is
257 * deactivated from the KVM side before it is unhooked from
258 * the wait-queue. If it is already deactivated, we can
259 * simply return knowing the other side will cleanup for us.
260 * We cannot race against the irqfd going away since the
261 * other side is required to acquire wqh->lock, which we hold
263 if (irqfd_is_active(irqfd))
264 irqfd_deactivate(irqfd);
266 spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
273 irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
276 struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt);
277 add_wait_queue(wqh, &irqfd->wait);
280 /* Must be called under irqfds.lock */
281 static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
282 struct kvm_irq_routing_table *irq_rt)
284 struct kvm_kernel_irq_routing_entry *e;
285 struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
288 n_entries = kvm_irq_map_gsi(entries, irq_rt, irqfd->gsi);
290 write_seqcount_begin(&irqfd->irq_entry_sc);
292 irqfd->irq_entry.type = 0;
295 for (i = 0; i < n_entries; ++i, ++e) {
296 /* Only fast-path MSI. */
297 if (e->type == KVM_IRQ_ROUTING_MSI)
298 irqfd->irq_entry = *e;
301 write_seqcount_end(&irqfd->irq_entry_sc);
305 kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
307 struct kvm_irq_routing_table *irq_rt;
308 struct _irqfd *irqfd, *tmp;
310 struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
314 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
319 irqfd->gsi = args->gsi;
320 INIT_LIST_HEAD(&irqfd->list);
321 INIT_WORK(&irqfd->inject, irqfd_inject);
322 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
323 seqcount_init(&irqfd->irq_entry_sc);
331 eventfd = eventfd_ctx_fileget(f.file);
332 if (IS_ERR(eventfd)) {
333 ret = PTR_ERR(eventfd);
337 irqfd->eventfd = eventfd;
339 if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
340 struct _irqfd_resampler *resampler;
342 resamplefd = eventfd_ctx_fdget(args->resamplefd);
343 if (IS_ERR(resamplefd)) {
344 ret = PTR_ERR(resamplefd);
348 irqfd->resamplefd = resamplefd;
349 INIT_LIST_HEAD(&irqfd->resampler_link);
351 mutex_lock(&kvm->irqfds.resampler_lock);
353 list_for_each_entry(resampler,
354 &kvm->irqfds.resampler_list, link) {
355 if (resampler->notifier.gsi == irqfd->gsi) {
356 irqfd->resampler = resampler;
361 if (!irqfd->resampler) {
362 resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
365 mutex_unlock(&kvm->irqfds.resampler_lock);
369 resampler->kvm = kvm;
370 INIT_LIST_HEAD(&resampler->list);
371 resampler->notifier.gsi = irqfd->gsi;
372 resampler->notifier.irq_acked = irqfd_resampler_ack;
373 INIT_LIST_HEAD(&resampler->link);
375 list_add(&resampler->link, &kvm->irqfds.resampler_list);
376 kvm_register_irq_ack_notifier(kvm,
377 &resampler->notifier);
378 irqfd->resampler = resampler;
381 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
382 synchronize_srcu(&kvm->irq_srcu);
384 mutex_unlock(&kvm->irqfds.resampler_lock);
388 * Install our own custom wake-up handling so we are notified via
389 * a callback whenever someone signals the underlying eventfd
391 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
392 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
394 spin_lock_irq(&kvm->irqfds.lock);
397 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
398 if (irqfd->eventfd != tmp->eventfd)
400 /* This fd is used for another irq already. */
402 spin_unlock_irq(&kvm->irqfds.lock);
406 irq_rt = rcu_dereference_protected(kvm->irq_routing,
407 lockdep_is_held(&kvm->irqfds.lock));
408 irqfd_update(kvm, irqfd, irq_rt);
410 list_add_tail(&irqfd->list, &kvm->irqfds.items);
412 spin_unlock_irq(&kvm->irqfds.lock);
415 * Check if there was an event already pending on the eventfd
416 * before we registered, and trigger it as if we didn't miss it.
418 events = f.file->f_op->poll(f.file, &irqfd->pt);
421 schedule_work(&irqfd->inject);
424 * do not drop the file until the irqfd is fully initialized, otherwise
425 * we might race against the POLLHUP
432 if (irqfd->resampler)
433 irqfd_resampler_shutdown(irqfd);
435 if (resamplefd && !IS_ERR(resamplefd))
436 eventfd_ctx_put(resamplefd);
438 if (eventfd && !IS_ERR(eventfd))
439 eventfd_ctx_put(eventfd);
450 kvm_eventfd_init(struct kvm *kvm)
452 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
453 spin_lock_init(&kvm->irqfds.lock);
454 INIT_LIST_HEAD(&kvm->irqfds.items);
455 INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
456 mutex_init(&kvm->irqfds.resampler_lock);
458 INIT_LIST_HEAD(&kvm->ioeventfds);
461 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
463 * shutdown any irqfd's that match fd+gsi
466 kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
468 struct _irqfd *irqfd, *tmp;
469 struct eventfd_ctx *eventfd;
471 eventfd = eventfd_ctx_fdget(args->fd);
473 return PTR_ERR(eventfd);
475 spin_lock_irq(&kvm->irqfds.lock);
477 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
478 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
480 * This clearing of irq_entry.type is needed for when
481 * another thread calls kvm_irq_routing_update before
482 * we flush workqueue below (we synchronize with
483 * kvm_irq_routing_update using irqfds.lock).
485 write_seqcount_begin(&irqfd->irq_entry_sc);
486 irqfd->irq_entry.type = 0;
487 write_seqcount_end(&irqfd->irq_entry_sc);
488 irqfd_deactivate(irqfd);
492 spin_unlock_irq(&kvm->irqfds.lock);
493 eventfd_ctx_put(eventfd);
496 * Block until we know all outstanding shutdown jobs have completed
497 * so that we guarantee there will not be any more interrupts on this
498 * gsi once this deassign function returns.
500 flush_workqueue(irqfd_cleanup_wq);
506 kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
508 if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
511 if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
512 return kvm_irqfd_deassign(kvm, args);
514 return kvm_irqfd_assign(kvm, args);
518 * This function is called as the kvm VM fd is being released. Shutdown all
519 * irqfds that still remain open
522 kvm_irqfd_release(struct kvm *kvm)
524 struct _irqfd *irqfd, *tmp;
526 spin_lock_irq(&kvm->irqfds.lock);
528 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
529 irqfd_deactivate(irqfd);
531 spin_unlock_irq(&kvm->irqfds.lock);
534 * Block until we know all outstanding shutdown jobs have completed
535 * since we do not take a kvm* reference.
537 flush_workqueue(irqfd_cleanup_wq);
542 * Change irq_routing and irqfd.
543 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
545 void kvm_irq_routing_update(struct kvm *kvm,
546 struct kvm_irq_routing_table *irq_rt)
548 struct _irqfd *irqfd;
550 spin_lock_irq(&kvm->irqfds.lock);
552 rcu_assign_pointer(kvm->irq_routing, irq_rt);
554 list_for_each_entry(irqfd, &kvm->irqfds.items, list)
555 irqfd_update(kvm, irqfd, irq_rt);
557 spin_unlock_irq(&kvm->irqfds.lock);
561 * create a host-wide workqueue for issuing deferred shutdown requests
562 * aggregated from all vm* instances. We need our own isolated single-thread
563 * queue to prevent deadlock against flushing the normal work-queue.
565 int kvm_irqfd_init(void)
567 irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
568 if (!irqfd_cleanup_wq)
574 void kvm_irqfd_exit(void)
576 destroy_workqueue(irqfd_cleanup_wq);
581 * --------------------------------------------------------------------
582 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
584 * userspace can register a PIO/MMIO address with an eventfd for receiving
585 * notification when the memory has been touched.
586 * --------------------------------------------------------------------
590 struct list_head list;
593 struct eventfd_ctx *eventfd;
595 struct kvm_io_device dev;
600 static inline struct _ioeventfd *
601 to_ioeventfd(struct kvm_io_device *dev)
603 return container_of(dev, struct _ioeventfd, dev);
607 ioeventfd_release(struct _ioeventfd *p)
609 eventfd_ctx_put(p->eventfd);
615 ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
620 /* address must be precise for a hit */
624 /* length = 0 means only look at the address, so always a hit */
627 if (len != p->length)
628 /* address-range must be precise for a hit */
632 /* all else equal, wildcard is always a hit */
635 /* otherwise, we have to actually compare the data */
637 BUG_ON(!IS_ALIGNED((unsigned long)val, len));
656 return _val == p->datamatch ? true : false;
659 /* MMIO/PIO writes trigger an event if the addr/val match */
661 ioeventfd_write(struct kvm_io_device *this, gpa_t addr, int len,
664 struct _ioeventfd *p = to_ioeventfd(this);
666 if (!ioeventfd_in_range(p, addr, len, val))
669 eventfd_signal(p->eventfd, 1);
674 * This function is called as KVM is completely shutting down. We do not
675 * need to worry about locking just nuke anything we have as quickly as possible
678 ioeventfd_destructor(struct kvm_io_device *this)
680 struct _ioeventfd *p = to_ioeventfd(this);
682 ioeventfd_release(p);
685 static const struct kvm_io_device_ops ioeventfd_ops = {
686 .write = ioeventfd_write,
687 .destructor = ioeventfd_destructor,
690 /* assumes kvm->slots_lock held */
692 ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
694 struct _ioeventfd *_p;
696 list_for_each_entry(_p, &kvm->ioeventfds, list)
697 if (_p->bus_idx == p->bus_idx &&
698 _p->addr == p->addr &&
699 (!_p->length || !p->length ||
700 (_p->length == p->length &&
701 (_p->wildcard || p->wildcard ||
702 _p->datamatch == p->datamatch))))
708 static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
710 if (flags & KVM_IOEVENTFD_FLAG_PIO)
712 if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
713 return KVM_VIRTIO_CCW_NOTIFY_BUS;
718 kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
720 enum kvm_bus bus_idx;
721 struct _ioeventfd *p;
722 struct eventfd_ctx *eventfd;
725 bus_idx = ioeventfd_bus_from_flags(args->flags);
726 /* must be natural-word sized, or 0 to ignore length */
738 /* check for range overflow */
739 if (args->addr + args->len < args->addr)
742 /* check for extra flags that we don't understand */
743 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
746 /* ioeventfd with no length can't be combined with DATAMATCH */
748 args->flags & (KVM_IOEVENTFD_FLAG_PIO |
749 KVM_IOEVENTFD_FLAG_DATAMATCH))
752 eventfd = eventfd_ctx_fdget(args->fd);
754 return PTR_ERR(eventfd);
756 p = kzalloc(sizeof(*p), GFP_KERNEL);
762 INIT_LIST_HEAD(&p->list);
763 p->addr = args->addr;
764 p->bus_idx = bus_idx;
765 p->length = args->len;
766 p->eventfd = eventfd;
768 /* The datamatch feature is optional, otherwise this is a wildcard */
769 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
770 p->datamatch = args->datamatch;
774 mutex_lock(&kvm->slots_lock);
776 /* Verify that there isn't a match already */
777 if (ioeventfd_check_collision(kvm, p)) {
782 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
784 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
789 /* When length is ignored, MMIO is also put on a separate bus, for
792 if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) {
793 ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS,
794 p->addr, 0, &p->dev);
799 kvm->buses[bus_idx]->ioeventfd_count++;
800 list_add_tail(&p->list, &kvm->ioeventfds);
802 mutex_unlock(&kvm->slots_lock);
807 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
809 mutex_unlock(&kvm->slots_lock);
813 eventfd_ctx_put(eventfd);
819 kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
821 enum kvm_bus bus_idx;
822 struct _ioeventfd *p, *tmp;
823 struct eventfd_ctx *eventfd;
826 bus_idx = ioeventfd_bus_from_flags(args->flags);
827 eventfd = eventfd_ctx_fdget(args->fd);
829 return PTR_ERR(eventfd);
831 mutex_lock(&kvm->slots_lock);
833 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
834 bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
836 if (p->bus_idx != bus_idx ||
837 p->eventfd != eventfd ||
838 p->addr != args->addr ||
839 p->length != args->len ||
840 p->wildcard != wildcard)
843 if (!p->wildcard && p->datamatch != args->datamatch)
846 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
848 kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS,
851 kvm->buses[bus_idx]->ioeventfd_count--;
852 ioeventfd_release(p);
857 mutex_unlock(&kvm->slots_lock);
859 eventfd_ctx_put(eventfd);
865 kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
867 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
868 return kvm_deassign_ioeventfd(kvm, args);
870 return kvm_assign_ioeventfd(kvm, args);