2 * Copyright (C) 2015 Linaro Ltd.
3 * Author: Shannon Zhao <shannon.zhao@linaro.org>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/cpu.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <linux/perf_event.h>
22 #include <linux/uaccess.h>
23 #include <asm/kvm_emulate.h>
24 #include <kvm/arm_pmu.h>
25 #include <kvm/arm_vgic.h>
28 * kvm_pmu_get_counter_value - get PMU counter value
29 * @vcpu: The vcpu pointer
30 * @select_idx: The counter index
32 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
34 u64 counter, reg, enabled, running;
35 struct kvm_pmu *pmu = &vcpu->arch.pmu;
36 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
38 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
39 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
40 counter = vcpu_sys_reg(vcpu, reg);
42 /* The real counter value is equal to the value of counter register plus
43 * the value perf event counts.
46 counter += perf_event_read_value(pmc->perf_event, &enabled,
49 return counter & pmc->bitmask;
53 * kvm_pmu_set_counter_value - set PMU counter value
54 * @vcpu: The vcpu pointer
55 * @select_idx: The counter index
56 * @val: The counter value
58 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
62 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
63 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
64 vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
68 * kvm_pmu_stop_counter - stop PMU counter
69 * @pmc: The PMU counter pointer
71 * If this counter has been configured to monitor some event, release it here.
73 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
77 if (pmc->perf_event) {
78 counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
79 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
80 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
81 vcpu_sys_reg(vcpu, reg) = counter;
82 perf_event_disable(pmc->perf_event);
83 perf_event_release_kernel(pmc->perf_event);
84 pmc->perf_event = NULL;
89 * kvm_pmu_vcpu_reset - reset pmu state for cpu
90 * @vcpu: The vcpu pointer
93 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
96 struct kvm_pmu *pmu = &vcpu->arch.pmu;
98 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
99 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
101 pmu->pmc[i].bitmask = 0xffffffffUL;
106 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
107 * @vcpu: The vcpu pointer
110 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
113 struct kvm_pmu *pmu = &vcpu->arch.pmu;
115 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
116 struct kvm_pmc *pmc = &pmu->pmc[i];
118 if (pmc->perf_event) {
119 perf_event_disable(pmc->perf_event);
120 perf_event_release_kernel(pmc->perf_event);
121 pmc->perf_event = NULL;
126 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
128 u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
130 val &= ARMV8_PMU_PMCR_N_MASK;
132 return BIT(ARMV8_PMU_CYCLE_IDX);
134 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
138 * kvm_pmu_enable_counter - enable selected PMU counter
139 * @vcpu: The vcpu pointer
140 * @val: the value guest writes to PMCNTENSET register
142 * Call perf_event_enable to start counting the perf event
144 void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val)
147 struct kvm_pmu *pmu = &vcpu->arch.pmu;
150 if (!(vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
153 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
158 if (pmc->perf_event) {
159 perf_event_enable(pmc->perf_event);
160 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
161 kvm_debug("fail to enable perf event\n");
167 * kvm_pmu_disable_counter - disable selected PMU counter
168 * @vcpu: The vcpu pointer
169 * @val: the value guest writes to PMCNTENCLR register
171 * Call perf_event_disable to stop counting the perf event
173 void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val)
176 struct kvm_pmu *pmu = &vcpu->arch.pmu;
182 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
188 perf_event_disable(pmc->perf_event);
192 static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
196 if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
197 reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
198 reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
199 reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
200 reg &= kvm_pmu_valid_counter_mask(vcpu);
206 static void kvm_pmu_check_overflow(struct kvm_vcpu *vcpu)
208 struct kvm_pmu *pmu = &vcpu->arch.pmu;
209 bool overflow = !!kvm_pmu_overflow_status(vcpu);
211 if (pmu->irq_level == overflow)
214 pmu->irq_level = overflow;
216 if (likely(irqchip_in_kernel(vcpu->kvm))) {
217 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
218 pmu->irq_num, overflow,
225 * kvm_pmu_overflow_set - set PMU overflow interrupt
226 * @vcpu: The vcpu pointer
227 * @val: the value guest writes to PMOVSSET register
229 void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val)
234 vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val;
235 kvm_pmu_check_overflow(vcpu);
238 static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
240 if (!kvm_arm_pmu_v3_ready(vcpu))
242 kvm_pmu_check_overflow(vcpu);
245 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
247 struct kvm_pmu *pmu = &vcpu->arch.pmu;
248 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
249 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
251 if (likely(irqchip_in_kernel(vcpu->kvm)))
254 return pmu->irq_level != run_level;
258 * Reflect the PMU overflow interrupt output level into the kvm_run structure
260 void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
262 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
264 /* Populate the timer bitmap for user space */
265 regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
266 if (vcpu->arch.pmu.irq_level)
267 regs->device_irq_level |= KVM_ARM_DEV_PMU;
271 * kvm_pmu_flush_hwstate - flush pmu state to cpu
272 * @vcpu: The vcpu pointer
274 * Check if the PMU has overflowed while we were running in the host, and inject
275 * an interrupt if that was the case.
277 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
279 kvm_pmu_update_state(vcpu);
283 * kvm_pmu_sync_hwstate - sync pmu state from cpu
284 * @vcpu: The vcpu pointer
286 * Check if the PMU has overflowed while we were running in the guest, and
287 * inject an interrupt if that was the case.
289 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
291 kvm_pmu_update_state(vcpu);
294 static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
297 struct kvm_vcpu_arch *vcpu_arch;
300 pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
301 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
302 return container_of(vcpu_arch, struct kvm_vcpu, arch);
306 * When perf event overflows, call kvm_pmu_overflow_set to set overflow status.
308 static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
309 struct perf_sample_data *data,
310 struct pt_regs *regs)
312 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
313 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
316 kvm_pmu_overflow_set(vcpu, BIT(idx));
320 * kvm_pmu_software_increment - do software increment
321 * @vcpu: The vcpu pointer
322 * @val: the value guest writes to PMSWINC register
324 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
327 u64 type, enable, reg;
332 enable = vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
333 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
336 type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
337 & ARMV8_PMU_EVTYPE_EVENT;
338 if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
339 && (enable & BIT(i))) {
340 reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
341 reg = lower_32_bits(reg);
342 vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
344 kvm_pmu_overflow_set(vcpu, BIT(i));
350 * kvm_pmu_handle_pmcr - handle PMCR register
351 * @vcpu: The vcpu pointer
352 * @val: the value guest writes to PMCR register
354 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
356 struct kvm_pmu *pmu = &vcpu->arch.pmu;
361 mask = kvm_pmu_valid_counter_mask(vcpu);
362 if (val & ARMV8_PMU_PMCR_E) {
363 kvm_pmu_enable_counter(vcpu,
364 vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
366 kvm_pmu_disable_counter(vcpu, mask);
369 if (val & ARMV8_PMU_PMCR_C)
370 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
372 if (val & ARMV8_PMU_PMCR_P) {
373 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
374 kvm_pmu_set_counter_value(vcpu, i, 0);
377 if (val & ARMV8_PMU_PMCR_LC) {
378 pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
379 pmc->bitmask = 0xffffffffffffffffUL;
383 static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
385 return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
386 (vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
390 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
391 * @vcpu: The vcpu pointer
392 * @data: The data guest writes to PMXEVTYPER_EL0
393 * @select_idx: The number of selected counter
395 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
396 * event with given hardware event number. Here we call perf_event API to
397 * emulate this action and create a kernel perf event for it.
399 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
402 struct kvm_pmu *pmu = &vcpu->arch.pmu;
403 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
404 struct perf_event *event;
405 struct perf_event_attr attr;
406 u64 eventsel, counter;
408 kvm_pmu_stop_counter(vcpu, pmc);
409 eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
411 /* Software increment event does't need to be backed by a perf event */
412 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
413 select_idx != ARMV8_PMU_CYCLE_IDX)
416 memset(&attr, 0, sizeof(struct perf_event_attr));
417 attr.type = PERF_TYPE_RAW;
418 attr.size = sizeof(attr);
420 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, select_idx);
421 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
422 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
423 attr.exclude_hv = 1; /* Don't count EL2 events */
424 attr.exclude_host = 1; /* Don't count host events */
425 attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ?
426 ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
428 counter = kvm_pmu_get_counter_value(vcpu, select_idx);
429 /* The initial sample period (overflow count) of an event. */
430 attr.sample_period = (-counter) & pmc->bitmask;
432 event = perf_event_create_kernel_counter(&attr, -1, current,
433 kvm_pmu_perf_overflow, pmc);
435 pr_err_once("kvm: pmu event creation failed %ld\n",
440 pmc->perf_event = event;
443 bool kvm_arm_support_pmu_v3(void)
446 * Check if HW_PERF_EVENTS are supported by checking the number of
447 * hardware performance counters. This could ensure the presence of
448 * a physical PMU and CONFIG_PERF_EVENT is selected.
450 return (perf_num_counters() > 0);
453 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
455 if (!vcpu->arch.pmu.created)
459 * A valid interrupt configuration for the PMU is either to have a
460 * properly configured interrupt number and using an in-kernel
461 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
463 if (irqchip_in_kernel(vcpu->kvm)) {
464 int irq = vcpu->arch.pmu.irq_num;
465 if (!kvm_arm_pmu_irq_initialized(vcpu))
469 * If we are using an in-kernel vgic, at this point we know
470 * the vgic will be initialized, so we can check the PMU irq
471 * number against the dimensions of the vgic and make sure
474 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
476 } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
480 kvm_pmu_vcpu_reset(vcpu);
481 vcpu->arch.pmu.ready = true;
486 static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
488 if (!kvm_arm_support_pmu_v3())
491 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
494 if (vcpu->arch.pmu.created)
497 if (irqchip_in_kernel(vcpu->kvm)) {
501 * If using the PMU with an in-kernel virtual GIC
502 * implementation, we require the GIC to be already
503 * initialized when initializing the PMU.
505 if (!vgic_initialized(vcpu->kvm))
508 if (!kvm_arm_pmu_irq_initialized(vcpu))
511 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
517 vcpu->arch.pmu.created = true;
522 * For one VM the interrupt type must be same for each vcpu.
523 * As a PPI, the interrupt number is the same for all vcpus,
524 * while as an SPI it must be a separate number per vcpu.
526 static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
529 struct kvm_vcpu *vcpu;
531 kvm_for_each_vcpu(i, vcpu, kvm) {
532 if (!kvm_arm_pmu_irq_initialized(vcpu))
535 if (irq_is_ppi(irq)) {
536 if (vcpu->arch.pmu.irq_num != irq)
539 if (vcpu->arch.pmu.irq_num == irq)
547 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
549 switch (attr->attr) {
550 case KVM_ARM_VCPU_PMU_V3_IRQ: {
551 int __user *uaddr = (int __user *)(long)attr->addr;
554 if (!irqchip_in_kernel(vcpu->kvm))
557 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
560 if (get_user(irq, uaddr))
563 /* The PMU overflow interrupt can be a PPI or a valid SPI. */
564 if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
567 if (!pmu_irq_is_valid(vcpu->kvm, irq))
570 if (kvm_arm_pmu_irq_initialized(vcpu))
573 kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
574 vcpu->arch.pmu.irq_num = irq;
577 case KVM_ARM_VCPU_PMU_V3_INIT:
578 return kvm_arm_pmu_v3_init(vcpu);
584 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
586 switch (attr->attr) {
587 case KVM_ARM_VCPU_PMU_V3_IRQ: {
588 int __user *uaddr = (int __user *)(long)attr->addr;
591 if (!irqchip_in_kernel(vcpu->kvm))
594 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
597 if (!kvm_arm_pmu_irq_initialized(vcpu))
600 irq = vcpu->arch.pmu.irq_num;
601 return put_user(irq, uaddr);
608 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
610 switch (attr->attr) {
611 case KVM_ARM_VCPU_PMU_V3_IRQ:
612 case KVM_ARM_VCPU_PMU_V3_INIT:
613 if (kvm_arm_support_pmu_v3() &&
614 test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))