]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - virt/kvm/arm/arch_timer.c
Merge branch 'ras-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[karo-tx-linux.git] / virt / kvm / arm / arch_timer.c
1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/of_irq.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/interrupt.h>
24
25 #include <clocksource/arm_arch_timer.h>
26 #include <asm/arch_timer.h>
27
28 #include <kvm/arm_vgic.h>
29 #include <kvm/arm_arch_timer.h>
30
31 static struct timecounter *timecounter;
32 static struct workqueue_struct *wqueue;
33 static unsigned int host_vtimer_irq;
34
35 static cycle_t kvm_phys_timer_read(void)
36 {
37         return timecounter->cc->read(timecounter->cc);
38 }
39
40 static bool timer_is_armed(struct arch_timer_cpu *timer)
41 {
42         return timer->armed;
43 }
44
45 /* timer_arm: as in "arm the timer", not as in ARM the company */
46 static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
47 {
48         timer->armed = true;
49         hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
50                       HRTIMER_MODE_ABS);
51 }
52
53 static void timer_disarm(struct arch_timer_cpu *timer)
54 {
55         if (timer_is_armed(timer)) {
56                 hrtimer_cancel(&timer->timer);
57                 cancel_work_sync(&timer->expired);
58                 timer->armed = false;
59         }
60 }
61
62 static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
63 {
64         int ret;
65         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
66
67         kvm_vgic_set_phys_irq_active(timer->map, true);
68         ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id,
69                                          timer->map,
70                                          timer->irq->level);
71         WARN_ON(ret);
72 }
73
74 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
75 {
76         struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
77
78         /*
79          * We disable the timer in the world switch and let it be
80          * handled by kvm_timer_sync_hwstate(). Getting a timer
81          * interrupt at this point is a sure sign of some major
82          * breakage.
83          */
84         pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu);
85         return IRQ_HANDLED;
86 }
87
88 /*
89  * Work function for handling the backup timer that we schedule when a vcpu is
90  * no longer running, but had a timer programmed to fire in the future.
91  */
92 static void kvm_timer_inject_irq_work(struct work_struct *work)
93 {
94         struct kvm_vcpu *vcpu;
95
96         vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
97         vcpu->arch.timer_cpu.armed = false;
98
99         /*
100          * If the vcpu is blocked we want to wake it up so that it will see
101          * the timer has expired when entering the guest.
102          */
103         kvm_vcpu_kick(vcpu);
104 }
105
106 static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
107 {
108         struct arch_timer_cpu *timer;
109         timer = container_of(hrt, struct arch_timer_cpu, timer);
110         queue_work(wqueue, &timer->expired);
111         return HRTIMER_NORESTART;
112 }
113
114 bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
115 {
116         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
117         cycle_t cval, now;
118
119         if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
120             !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE) ||
121             kvm_vgic_get_phys_irq_active(timer->map))
122                 return false;
123
124         cval = timer->cntv_cval;
125         now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
126
127         return cval <= now;
128 }
129
130 /**
131  * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
132  * @vcpu: The vcpu pointer
133  *
134  * Disarm any pending soft timers, since the world-switch code will write the
135  * virtual timer state back to the physical CPU.
136  */
137 void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
138 {
139         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
140         bool phys_active;
141         int ret;
142
143         /*
144          * We're about to run this vcpu again, so there is no need to
145          * keep the background timer running, as we're about to
146          * populate the CPU timer again.
147          */
148         timer_disarm(timer);
149
150         /*
151          * If the timer expired while we were not scheduled, now is the time
152          * to inject it.
153          */
154         if (kvm_timer_should_fire(vcpu))
155                 kvm_timer_inject_irq(vcpu);
156
157         /*
158          * We keep track of whether the edge-triggered interrupt has been
159          * signalled to the vgic/guest, and if so, we mask the interrupt and
160          * the physical distributor to prevent the timer from raising a
161          * physical interrupt whenever we run a guest, preventing forward
162          * VCPU progress.
163          */
164         if (kvm_vgic_get_phys_irq_active(timer->map))
165                 phys_active = true;
166         else
167                 phys_active = false;
168
169         ret = irq_set_irqchip_state(timer->map->irq,
170                                     IRQCHIP_STATE_ACTIVE,
171                                     phys_active);
172         WARN_ON(ret);
173 }
174
175 /**
176  * kvm_timer_sync_hwstate - sync timer state from cpu
177  * @vcpu: The vcpu pointer
178  *
179  * Check if the virtual timer was armed and either schedule a corresponding
180  * soft timer or inject directly if already expired.
181  */
182 void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
183 {
184         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
185         cycle_t cval, now;
186         u64 ns;
187
188         BUG_ON(timer_is_armed(timer));
189
190         if (kvm_timer_should_fire(vcpu)) {
191                 /*
192                  * Timer has already expired while we were not
193                  * looking. Inject the interrupt and carry on.
194                  */
195                 kvm_timer_inject_irq(vcpu);
196                 return;
197         }
198
199         cval = timer->cntv_cval;
200         now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
201
202         ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask,
203                                  &timecounter->frac);
204         timer_arm(timer, ns);
205 }
206
207 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
208                          const struct kvm_irq_level *irq)
209 {
210         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
211         struct irq_phys_map *map;
212
213         /*
214          * The vcpu timer irq number cannot be determined in
215          * kvm_timer_vcpu_init() because it is called much before
216          * kvm_vcpu_set_target(). To handle this, we determine
217          * vcpu timer irq number when the vcpu is reset.
218          */
219         timer->irq = irq;
220
221         /*
222          * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
223          * and to 0 for ARMv7.  We provide an implementation that always
224          * resets the timer to be disabled and unmasked and is compliant with
225          * the ARMv7 architecture.
226          */
227         timer->cntv_ctl = 0;
228
229         /*
230          * Tell the VGIC that the virtual interrupt is tied to a
231          * physical interrupt. We do that once per VCPU.
232          */
233         map = kvm_vgic_map_phys_irq(vcpu, irq->irq, host_vtimer_irq);
234         if (WARN_ON(IS_ERR(map)))
235                 return PTR_ERR(map);
236
237         timer->map = map;
238         return 0;
239 }
240
241 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
242 {
243         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
244
245         INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
246         hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
247         timer->timer.function = kvm_timer_expire;
248 }
249
250 static void kvm_timer_init_interrupt(void *info)
251 {
252         enable_percpu_irq(host_vtimer_irq, 0);
253 }
254
255 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
256 {
257         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
258
259         switch (regid) {
260         case KVM_REG_ARM_TIMER_CTL:
261                 timer->cntv_ctl = value;
262                 break;
263         case KVM_REG_ARM_TIMER_CNT:
264                 vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value;
265                 break;
266         case KVM_REG_ARM_TIMER_CVAL:
267                 timer->cntv_cval = value;
268                 break;
269         default:
270                 return -1;
271         }
272         return 0;
273 }
274
275 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
276 {
277         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
278
279         switch (regid) {
280         case KVM_REG_ARM_TIMER_CTL:
281                 return timer->cntv_ctl;
282         case KVM_REG_ARM_TIMER_CNT:
283                 return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
284         case KVM_REG_ARM_TIMER_CVAL:
285                 return timer->cntv_cval;
286         }
287         return (u64)-1;
288 }
289
290 static int kvm_timer_cpu_notify(struct notifier_block *self,
291                                 unsigned long action, void *cpu)
292 {
293         switch (action) {
294         case CPU_STARTING:
295         case CPU_STARTING_FROZEN:
296                 kvm_timer_init_interrupt(NULL);
297                 break;
298         case CPU_DYING:
299         case CPU_DYING_FROZEN:
300                 disable_percpu_irq(host_vtimer_irq);
301                 break;
302         }
303
304         return NOTIFY_OK;
305 }
306
307 static struct notifier_block kvm_timer_cpu_nb = {
308         .notifier_call = kvm_timer_cpu_notify,
309 };
310
311 static const struct of_device_id arch_timer_of_match[] = {
312         { .compatible   = "arm,armv7-timer",    },
313         { .compatible   = "arm,armv8-timer",    },
314         {},
315 };
316
317 int kvm_timer_hyp_init(void)
318 {
319         struct device_node *np;
320         unsigned int ppi;
321         int err;
322
323         timecounter = arch_timer_get_timecounter();
324         if (!timecounter)
325                 return -ENODEV;
326
327         np = of_find_matching_node(NULL, arch_timer_of_match);
328         if (!np) {
329                 kvm_err("kvm_arch_timer: can't find DT node\n");
330                 return -ENODEV;
331         }
332
333         ppi = irq_of_parse_and_map(np, 2);
334         if (!ppi) {
335                 kvm_err("kvm_arch_timer: no virtual timer interrupt\n");
336                 err = -EINVAL;
337                 goto out;
338         }
339
340         err = request_percpu_irq(ppi, kvm_arch_timer_handler,
341                                  "kvm guest timer", kvm_get_running_vcpus());
342         if (err) {
343                 kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
344                         ppi, err);
345                 goto out;
346         }
347
348         host_vtimer_irq = ppi;
349
350         err = __register_cpu_notifier(&kvm_timer_cpu_nb);
351         if (err) {
352                 kvm_err("Cannot register timer CPU notifier\n");
353                 goto out_free;
354         }
355
356         wqueue = create_singlethread_workqueue("kvm_arch_timer");
357         if (!wqueue) {
358                 err = -ENOMEM;
359                 goto out_free;
360         }
361
362         kvm_info("%s IRQ%d\n", np->name, ppi);
363         on_each_cpu(kvm_timer_init_interrupt, NULL, 1);
364
365         goto out;
366 out_free:
367         free_percpu_irq(ppi, kvm_get_running_vcpus());
368 out:
369         of_node_put(np);
370         return err;
371 }
372
373 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
374 {
375         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
376
377         timer_disarm(timer);
378         if (timer->map)
379                 kvm_vgic_unmap_phys_irq(vcpu, timer->map);
380 }
381
382 void kvm_timer_enable(struct kvm *kvm)
383 {
384         if (kvm->arch.timer.enabled)
385                 return;
386
387         /*
388          * There is a potential race here between VCPUs starting for the first
389          * time, which may be enabling the timer multiple times.  That doesn't
390          * hurt though, because we're just setting a variable to the same
391          * variable that it already was.  The important thing is that all
392          * VCPUs have the enabled variable set, before entering the guest, if
393          * the arch timers are enabled.
394          */
395         if (timecounter && wqueue)
396                 kvm->arch.timer.enabled = 1;
397 }
398
399 void kvm_timer_init(struct kvm *kvm)
400 {
401         kvm->arch.timer.cntvoff = kvm_phys_timer_read();
402 }