]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - virt/kvm/arm/arch_timer.c
powerpc/dma: dma_set_coherent_mask() should not be GPL only
[karo-tx-linux.git] / virt / kvm / arm / arch_timer.c
1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/of_irq.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/interrupt.h>
24
25 #include <clocksource/arm_arch_timer.h>
26 #include <asm/arch_timer.h>
27
28 #include <kvm/arm_vgic.h>
29 #include <kvm/arm_arch_timer.h>
30
31 static struct timecounter *timecounter;
32 static struct workqueue_struct *wqueue;
33 static unsigned int host_vtimer_irq;
34
35 static cycle_t kvm_phys_timer_read(void)
36 {
37         return timecounter->cc->read(timecounter->cc);
38 }
39
40 static bool timer_is_armed(struct arch_timer_cpu *timer)
41 {
42         return timer->armed;
43 }
44
45 /* timer_arm: as in "arm the timer", not as in ARM the company */
46 static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
47 {
48         timer->armed = true;
49         hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
50                       HRTIMER_MODE_ABS);
51 }
52
53 static void timer_disarm(struct arch_timer_cpu *timer)
54 {
55         if (timer_is_armed(timer)) {
56                 hrtimer_cancel(&timer->timer);
57                 cancel_work_sync(&timer->expired);
58                 timer->armed = false;
59         }
60 }
61
62 static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
63 {
64         int ret;
65         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
66
67         kvm_vgic_set_phys_irq_active(timer->map, true);
68         ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id,
69                                          timer->map,
70                                          timer->irq->level);
71         WARN_ON(ret);
72 }
73
74 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
75 {
76         struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
77
78         /*
79          * We disable the timer in the world switch and let it be
80          * handled by kvm_timer_sync_hwstate(). Getting a timer
81          * interrupt at this point is a sure sign of some major
82          * breakage.
83          */
84         pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu);
85         return IRQ_HANDLED;
86 }
87
88 /*
89  * Work function for handling the backup timer that we schedule when a vcpu is
90  * no longer running, but had a timer programmed to fire in the future.
91  */
92 static void kvm_timer_inject_irq_work(struct work_struct *work)
93 {
94         struct kvm_vcpu *vcpu;
95
96         vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
97         vcpu->arch.timer_cpu.armed = false;
98
99         /*
100          * If the vcpu is blocked we want to wake it up so that it will see
101          * the timer has expired when entering the guest.
102          */
103         kvm_vcpu_kick(vcpu);
104 }
105
106 static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
107 {
108         struct arch_timer_cpu *timer;
109         timer = container_of(hrt, struct arch_timer_cpu, timer);
110         queue_work(wqueue, &timer->expired);
111         return HRTIMER_NORESTART;
112 }
113
114 bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
115 {
116         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
117         cycle_t cval, now;
118
119         if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
120             !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE) ||
121             kvm_vgic_get_phys_irq_active(timer->map))
122                 return false;
123
124         cval = timer->cntv_cval;
125         now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
126
127         return cval <= now;
128 }
129
130 /**
131  * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
132  * @vcpu: The vcpu pointer
133  *
134  * Disarm any pending soft timers, since the world-switch code will write the
135  * virtual timer state back to the physical CPU.
136  */
137 void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
138 {
139         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
140
141         /*
142          * We're about to run this vcpu again, so there is no need to
143          * keep the background timer running, as we're about to
144          * populate the CPU timer again.
145          */
146         timer_disarm(timer);
147
148         /*
149          * If the timer expired while we were not scheduled, now is the time
150          * to inject it.
151          */
152         if (kvm_timer_should_fire(vcpu))
153                 kvm_timer_inject_irq(vcpu);
154 }
155
156 /**
157  * kvm_timer_sync_hwstate - sync timer state from cpu
158  * @vcpu: The vcpu pointer
159  *
160  * Check if the virtual timer was armed and either schedule a corresponding
161  * soft timer or inject directly if already expired.
162  */
163 void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
164 {
165         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
166         cycle_t cval, now;
167         u64 ns;
168
169         BUG_ON(timer_is_armed(timer));
170
171         if (kvm_timer_should_fire(vcpu)) {
172                 /*
173                  * Timer has already expired while we were not
174                  * looking. Inject the interrupt and carry on.
175                  */
176                 kvm_timer_inject_irq(vcpu);
177                 return;
178         }
179
180         cval = timer->cntv_cval;
181         now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
182
183         ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask,
184                                  &timecounter->frac);
185         timer_arm(timer, ns);
186 }
187
188 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
189                          const struct kvm_irq_level *irq)
190 {
191         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
192         struct irq_phys_map *map;
193
194         /*
195          * The vcpu timer irq number cannot be determined in
196          * kvm_timer_vcpu_init() because it is called much before
197          * kvm_vcpu_set_target(). To handle this, we determine
198          * vcpu timer irq number when the vcpu is reset.
199          */
200         timer->irq = irq;
201
202         /*
203          * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
204          * and to 0 for ARMv7.  We provide an implementation that always
205          * resets the timer to be disabled and unmasked and is compliant with
206          * the ARMv7 architecture.
207          */
208         timer->cntv_ctl = 0;
209
210         /*
211          * Tell the VGIC that the virtual interrupt is tied to a
212          * physical interrupt. We do that once per VCPU.
213          */
214         map = kvm_vgic_map_phys_irq(vcpu, irq->irq, host_vtimer_irq);
215         if (WARN_ON(IS_ERR(map)))
216                 return PTR_ERR(map);
217
218         timer->map = map;
219         return 0;
220 }
221
222 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
223 {
224         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
225
226         INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
227         hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
228         timer->timer.function = kvm_timer_expire;
229 }
230
231 static void kvm_timer_init_interrupt(void *info)
232 {
233         enable_percpu_irq(host_vtimer_irq, 0);
234 }
235
236 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
237 {
238         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
239
240         switch (regid) {
241         case KVM_REG_ARM_TIMER_CTL:
242                 timer->cntv_ctl = value;
243                 break;
244         case KVM_REG_ARM_TIMER_CNT:
245                 vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value;
246                 break;
247         case KVM_REG_ARM_TIMER_CVAL:
248                 timer->cntv_cval = value;
249                 break;
250         default:
251                 return -1;
252         }
253         return 0;
254 }
255
256 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
257 {
258         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
259
260         switch (regid) {
261         case KVM_REG_ARM_TIMER_CTL:
262                 return timer->cntv_ctl;
263         case KVM_REG_ARM_TIMER_CNT:
264                 return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
265         case KVM_REG_ARM_TIMER_CVAL:
266                 return timer->cntv_cval;
267         }
268         return (u64)-1;
269 }
270
271 static int kvm_timer_cpu_notify(struct notifier_block *self,
272                                 unsigned long action, void *cpu)
273 {
274         switch (action) {
275         case CPU_STARTING:
276         case CPU_STARTING_FROZEN:
277                 kvm_timer_init_interrupt(NULL);
278                 break;
279         case CPU_DYING:
280         case CPU_DYING_FROZEN:
281                 disable_percpu_irq(host_vtimer_irq);
282                 break;
283         }
284
285         return NOTIFY_OK;
286 }
287
288 static struct notifier_block kvm_timer_cpu_nb = {
289         .notifier_call = kvm_timer_cpu_notify,
290 };
291
292 static const struct of_device_id arch_timer_of_match[] = {
293         { .compatible   = "arm,armv7-timer",    },
294         { .compatible   = "arm,armv8-timer",    },
295         {},
296 };
297
298 int kvm_timer_hyp_init(void)
299 {
300         struct device_node *np;
301         unsigned int ppi;
302         int err;
303
304         timecounter = arch_timer_get_timecounter();
305         if (!timecounter)
306                 return -ENODEV;
307
308         np = of_find_matching_node(NULL, arch_timer_of_match);
309         if (!np) {
310                 kvm_err("kvm_arch_timer: can't find DT node\n");
311                 return -ENODEV;
312         }
313
314         ppi = irq_of_parse_and_map(np, 2);
315         if (!ppi) {
316                 kvm_err("kvm_arch_timer: no virtual timer interrupt\n");
317                 err = -EINVAL;
318                 goto out;
319         }
320
321         err = request_percpu_irq(ppi, kvm_arch_timer_handler,
322                                  "kvm guest timer", kvm_get_running_vcpus());
323         if (err) {
324                 kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
325                         ppi, err);
326                 goto out;
327         }
328
329         host_vtimer_irq = ppi;
330
331         err = __register_cpu_notifier(&kvm_timer_cpu_nb);
332         if (err) {
333                 kvm_err("Cannot register timer CPU notifier\n");
334                 goto out_free;
335         }
336
337         wqueue = create_singlethread_workqueue("kvm_arch_timer");
338         if (!wqueue) {
339                 err = -ENOMEM;
340                 goto out_free;
341         }
342
343         kvm_info("%s IRQ%d\n", np->name, ppi);
344         on_each_cpu(kvm_timer_init_interrupt, NULL, 1);
345
346         goto out;
347 out_free:
348         free_percpu_irq(ppi, kvm_get_running_vcpus());
349 out:
350         of_node_put(np);
351         return err;
352 }
353
354 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
355 {
356         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
357
358         timer_disarm(timer);
359         if (timer->map)
360                 kvm_vgic_unmap_phys_irq(vcpu, timer->map);
361 }
362
363 void kvm_timer_enable(struct kvm *kvm)
364 {
365         if (kvm->arch.timer.enabled)
366                 return;
367
368         /*
369          * There is a potential race here between VCPUs starting for the first
370          * time, which may be enabling the timer multiple times.  That doesn't
371          * hurt though, because we're just setting a variable to the same
372          * variable that it already was.  The important thing is that all
373          * VCPUs have the enabled variable set, before entering the guest, if
374          * the arch timers are enabled.
375          */
376         if (timecounter && wqueue)
377                 kvm->arch.timer.enabled = 1;
378 }
379
380 void kvm_timer_init(struct kvm *kvm)
381 {
382         kvm->arch.timer.cntvoff = kvm_phys_timer_read();
383 }