]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/s390/kvm/kvm-s390.c
arm: imx6: defconfig: update tx6 defconfigs
[karo-tx-linux.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/switch_to.h>
31 #include <asm/facility.h>
32 #include <asm/sclp.h>
33 #include "kvm-s390.h"
34 #include "gaccess.h"
35
36 #define CREATE_TRACE_POINTS
37 #include "trace.h"
38 #include "trace-s390.h"
39
40 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42 struct kvm_stats_debugfs_item debugfs_entries[] = {
43         { "userspace_handled", VCPU_STAT(exit_userspace) },
44         { "exit_null", VCPU_STAT(exit_null) },
45         { "exit_validity", VCPU_STAT(exit_validity) },
46         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
47         { "exit_external_request", VCPU_STAT(exit_external_request) },
48         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
49         { "exit_instruction", VCPU_STAT(exit_instruction) },
50         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
51         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
52         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
53         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
54         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
55         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
56         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
57         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
58         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
59         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
60         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
61         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
62         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
63         { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
64         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
65         { "instruction_spx", VCPU_STAT(instruction_spx) },
66         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
67         { "instruction_stap", VCPU_STAT(instruction_stap) },
68         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
69         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
70         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
71         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
72         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
73         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
74         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
75         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
76         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
77         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
78         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
79         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
80         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
81         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
82         { "diagnose_10", VCPU_STAT(diagnose_10) },
83         { "diagnose_44", VCPU_STAT(diagnose_44) },
84         { "diagnose_9c", VCPU_STAT(diagnose_9c) },
85         { NULL }
86 };
87
88 unsigned long *vfacilities;
89 static struct gmap_notifier gmap_notifier;
90
91 /* test availability of vfacility */
92 static inline int test_vfacility(unsigned long nr)
93 {
94         return __test_facility(nr, (void *) vfacilities);
95 }
96
97 /* Section: not file related */
98 int kvm_arch_hardware_enable(void *garbage)
99 {
100         /* every s390 is virtualization enabled ;-) */
101         return 0;
102 }
103
104 void kvm_arch_hardware_disable(void *garbage)
105 {
106 }
107
108 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
109
110 int kvm_arch_hardware_setup(void)
111 {
112         gmap_notifier.notifier_call = kvm_gmap_notifier;
113         gmap_register_ipte_notifier(&gmap_notifier);
114         return 0;
115 }
116
117 void kvm_arch_hardware_unsetup(void)
118 {
119         gmap_unregister_ipte_notifier(&gmap_notifier);
120 }
121
122 void kvm_arch_check_processor_compat(void *rtn)
123 {
124 }
125
126 int kvm_arch_init(void *opaque)
127 {
128         return 0;
129 }
130
131 void kvm_arch_exit(void)
132 {
133 }
134
135 /* Section: device related */
136 long kvm_arch_dev_ioctl(struct file *filp,
137                         unsigned int ioctl, unsigned long arg)
138 {
139         if (ioctl == KVM_S390_ENABLE_SIE)
140                 return s390_enable_sie();
141         return -EINVAL;
142 }
143
144 int kvm_dev_ioctl_check_extension(long ext)
145 {
146         int r;
147
148         switch (ext) {
149         case KVM_CAP_S390_PSW:
150         case KVM_CAP_S390_GMAP:
151         case KVM_CAP_SYNC_MMU:
152 #ifdef CONFIG_KVM_S390_UCONTROL
153         case KVM_CAP_S390_UCONTROL:
154 #endif
155         case KVM_CAP_SYNC_REGS:
156         case KVM_CAP_ONE_REG:
157         case KVM_CAP_ENABLE_CAP:
158         case KVM_CAP_S390_CSS_SUPPORT:
159         case KVM_CAP_IOEVENTFD:
160                 r = 1;
161                 break;
162         case KVM_CAP_NR_VCPUS:
163         case KVM_CAP_MAX_VCPUS:
164                 r = KVM_MAX_VCPUS;
165                 break;
166         case KVM_CAP_NR_MEMSLOTS:
167                 r = KVM_USER_MEM_SLOTS;
168                 break;
169         case KVM_CAP_S390_COW:
170                 r = MACHINE_HAS_ESOP;
171                 break;
172         default:
173                 r = 0;
174         }
175         return r;
176 }
177
178 /* Section: vm related */
179 /*
180  * Get (and clear) the dirty memory log for a memory slot.
181  */
182 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
183                                struct kvm_dirty_log *log)
184 {
185         return 0;
186 }
187
188 long kvm_arch_vm_ioctl(struct file *filp,
189                        unsigned int ioctl, unsigned long arg)
190 {
191         struct kvm *kvm = filp->private_data;
192         void __user *argp = (void __user *)arg;
193         int r;
194
195         switch (ioctl) {
196         case KVM_S390_INTERRUPT: {
197                 struct kvm_s390_interrupt s390int;
198
199                 r = -EFAULT;
200                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
201                         break;
202                 r = kvm_s390_inject_vm(kvm, &s390int);
203                 break;
204         }
205         default:
206                 r = -ENOTTY;
207         }
208
209         return r;
210 }
211
212 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
213 {
214         int rc;
215         char debug_name[16];
216
217         rc = -EINVAL;
218 #ifdef CONFIG_KVM_S390_UCONTROL
219         if (type & ~KVM_VM_S390_UCONTROL)
220                 goto out_err;
221         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
222                 goto out_err;
223 #else
224         if (type)
225                 goto out_err;
226 #endif
227
228         rc = s390_enable_sie();
229         if (rc)
230                 goto out_err;
231
232         rc = -ENOMEM;
233
234         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
235         if (!kvm->arch.sca)
236                 goto out_err;
237
238         sprintf(debug_name, "kvm-%u", current->pid);
239
240         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
241         if (!kvm->arch.dbf)
242                 goto out_nodbf;
243
244         spin_lock_init(&kvm->arch.float_int.lock);
245         INIT_LIST_HEAD(&kvm->arch.float_int.list);
246
247         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
248         VM_EVENT(kvm, 3, "%s", "vm created");
249
250         if (type & KVM_VM_S390_UCONTROL) {
251                 kvm->arch.gmap = NULL;
252         } else {
253                 kvm->arch.gmap = gmap_alloc(current->mm);
254                 if (!kvm->arch.gmap)
255                         goto out_nogmap;
256                 kvm->arch.gmap->private = kvm;
257         }
258
259         kvm->arch.css_support = 0;
260
261         return 0;
262 out_nogmap:
263         debug_unregister(kvm->arch.dbf);
264 out_nodbf:
265         free_page((unsigned long)(kvm->arch.sca));
266 out_err:
267         return rc;
268 }
269
270 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
271 {
272         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
273         trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
274         if (!kvm_is_ucontrol(vcpu->kvm)) {
275                 clear_bit(63 - vcpu->vcpu_id,
276                           (unsigned long *) &vcpu->kvm->arch.sca->mcn);
277                 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
278                     (__u64) vcpu->arch.sie_block)
279                         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
280         }
281         smp_mb();
282
283         if (kvm_is_ucontrol(vcpu->kvm))
284                 gmap_free(vcpu->arch.gmap);
285
286         free_page((unsigned long)(vcpu->arch.sie_block));
287         kvm_vcpu_uninit(vcpu);
288         kmem_cache_free(kvm_vcpu_cache, vcpu);
289 }
290
291 static void kvm_free_vcpus(struct kvm *kvm)
292 {
293         unsigned int i;
294         struct kvm_vcpu *vcpu;
295
296         kvm_for_each_vcpu(i, vcpu, kvm)
297                 kvm_arch_vcpu_destroy(vcpu);
298
299         mutex_lock(&kvm->lock);
300         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
301                 kvm->vcpus[i] = NULL;
302
303         atomic_set(&kvm->online_vcpus, 0);
304         mutex_unlock(&kvm->lock);
305 }
306
307 void kvm_arch_sync_events(struct kvm *kvm)
308 {
309 }
310
311 void kvm_arch_destroy_vm(struct kvm *kvm)
312 {
313         kvm_free_vcpus(kvm);
314         free_page((unsigned long)(kvm->arch.sca));
315         debug_unregister(kvm->arch.dbf);
316         if (!kvm_is_ucontrol(kvm))
317                 gmap_free(kvm->arch.gmap);
318 }
319
320 /* Section: vcpu related */
321 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
322 {
323         if (kvm_is_ucontrol(vcpu->kvm)) {
324                 vcpu->arch.gmap = gmap_alloc(current->mm);
325                 if (!vcpu->arch.gmap)
326                         return -ENOMEM;
327                 vcpu->arch.gmap->private = vcpu->kvm;
328                 return 0;
329         }
330
331         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
332         vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
333                                     KVM_SYNC_GPRS |
334                                     KVM_SYNC_ACRS |
335                                     KVM_SYNC_CRS;
336         return 0;
337 }
338
339 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
340 {
341         /* Nothing todo */
342 }
343
344 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
345 {
346         save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
347         save_fp_regs(vcpu->arch.host_fpregs.fprs);
348         save_access_regs(vcpu->arch.host_acrs);
349         restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
350         restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
351         restore_access_regs(vcpu->run->s.regs.acrs);
352         gmap_enable(vcpu->arch.gmap);
353         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
354 }
355
356 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
357 {
358         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
359         gmap_disable(vcpu->arch.gmap);
360         save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
361         save_fp_regs(vcpu->arch.guest_fpregs.fprs);
362         save_access_regs(vcpu->run->s.regs.acrs);
363         restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
364         restore_fp_regs(vcpu->arch.host_fpregs.fprs);
365         restore_access_regs(vcpu->arch.host_acrs);
366 }
367
368 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
369 {
370         /* this equals initial cpu reset in pop, but we don't switch to ESA */
371         vcpu->arch.sie_block->gpsw.mask = 0UL;
372         vcpu->arch.sie_block->gpsw.addr = 0UL;
373         kvm_s390_set_prefix(vcpu, 0);
374         vcpu->arch.sie_block->cputm     = 0UL;
375         vcpu->arch.sie_block->ckc       = 0UL;
376         vcpu->arch.sie_block->todpr     = 0;
377         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
378         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
379         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
380         vcpu->arch.guest_fpregs.fpc = 0;
381         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
382         vcpu->arch.sie_block->gbea = 1;
383         atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
384 }
385
386 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
387 {
388         return 0;
389 }
390
391 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
392 {
393         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
394                                                     CPUSTAT_SM |
395                                                     CPUSTAT_STOPPED |
396                                                     CPUSTAT_GED);
397         vcpu->arch.sie_block->ecb   = 6;
398         vcpu->arch.sie_block->ecb2  = 8;
399         vcpu->arch.sie_block->eca   = 0xC1002001U;
400         vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
401         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
402         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
403                      (unsigned long) vcpu);
404         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
405         get_cpu_id(&vcpu->arch.cpu_id);
406         vcpu->arch.cpu_id.version = 0xff;
407         return 0;
408 }
409
410 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
411                                       unsigned int id)
412 {
413         struct kvm_vcpu *vcpu;
414         int rc = -EINVAL;
415
416         if (id >= KVM_MAX_VCPUS)
417                 goto out;
418
419         rc = -ENOMEM;
420
421         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
422         if (!vcpu)
423                 goto out;
424
425         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
426                                         get_zeroed_page(GFP_KERNEL);
427
428         if (!vcpu->arch.sie_block)
429                 goto out_free_cpu;
430
431         vcpu->arch.sie_block->icpua = id;
432         if (!kvm_is_ucontrol(kvm)) {
433                 if (!kvm->arch.sca) {
434                         WARN_ON_ONCE(1);
435                         goto out_free_cpu;
436                 }
437                 if (!kvm->arch.sca->cpu[id].sda)
438                         kvm->arch.sca->cpu[id].sda =
439                                 (__u64) vcpu->arch.sie_block;
440                 vcpu->arch.sie_block->scaoh =
441                         (__u32)(((__u64)kvm->arch.sca) >> 32);
442                 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
443                 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
444         }
445
446         spin_lock_init(&vcpu->arch.local_int.lock);
447         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
448         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
449         spin_lock(&kvm->arch.float_int.lock);
450         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
451         vcpu->arch.local_int.wq = &vcpu->wq;
452         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
453         spin_unlock(&kvm->arch.float_int.lock);
454
455         rc = kvm_vcpu_init(vcpu, kvm, id);
456         if (rc)
457                 goto out_free_sie_block;
458         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
459                  vcpu->arch.sie_block);
460         trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
461
462         return vcpu;
463 out_free_sie_block:
464         free_page((unsigned long)(vcpu->arch.sie_block));
465 out_free_cpu:
466         kmem_cache_free(kvm_vcpu_cache, vcpu);
467 out:
468         return ERR_PTR(rc);
469 }
470
471 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
472 {
473         /* kvm common code refers to this, but never calls it */
474         BUG();
475         return 0;
476 }
477
478 void s390_vcpu_block(struct kvm_vcpu *vcpu)
479 {
480         atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
481 }
482
483 void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
484 {
485         atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
486 }
487
488 /*
489  * Kick a guest cpu out of SIE and wait until SIE is not running.
490  * If the CPU is not running (e.g. waiting as idle) the function will
491  * return immediately. */
492 void exit_sie(struct kvm_vcpu *vcpu)
493 {
494         atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
495         while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
496                 cpu_relax();
497 }
498
499 /* Kick a guest cpu out of SIE and prevent SIE-reentry */
500 void exit_sie_sync(struct kvm_vcpu *vcpu)
501 {
502         s390_vcpu_block(vcpu);
503         exit_sie(vcpu);
504 }
505
506 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
507 {
508         int i;
509         struct kvm *kvm = gmap->private;
510         struct kvm_vcpu *vcpu;
511
512         kvm_for_each_vcpu(i, vcpu, kvm) {
513                 /* match against both prefix pages */
514                 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
515                         VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
516                         kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
517                         exit_sie_sync(vcpu);
518                 }
519         }
520 }
521
522 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
523 {
524         /* kvm common code refers to this, but never calls it */
525         BUG();
526         return 0;
527 }
528
529 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
530                                            struct kvm_one_reg *reg)
531 {
532         int r = -EINVAL;
533
534         switch (reg->id) {
535         case KVM_REG_S390_TODPR:
536                 r = put_user(vcpu->arch.sie_block->todpr,
537                              (u32 __user *)reg->addr);
538                 break;
539         case KVM_REG_S390_EPOCHDIFF:
540                 r = put_user(vcpu->arch.sie_block->epoch,
541                              (u64 __user *)reg->addr);
542                 break;
543         case KVM_REG_S390_CPU_TIMER:
544                 r = put_user(vcpu->arch.sie_block->cputm,
545                              (u64 __user *)reg->addr);
546                 break;
547         case KVM_REG_S390_CLOCK_COMP:
548                 r = put_user(vcpu->arch.sie_block->ckc,
549                              (u64 __user *)reg->addr);
550                 break;
551         default:
552                 break;
553         }
554
555         return r;
556 }
557
558 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
559                                            struct kvm_one_reg *reg)
560 {
561         int r = -EINVAL;
562
563         switch (reg->id) {
564         case KVM_REG_S390_TODPR:
565                 r = get_user(vcpu->arch.sie_block->todpr,
566                              (u32 __user *)reg->addr);
567                 break;
568         case KVM_REG_S390_EPOCHDIFF:
569                 r = get_user(vcpu->arch.sie_block->epoch,
570                              (u64 __user *)reg->addr);
571                 break;
572         case KVM_REG_S390_CPU_TIMER:
573                 r = get_user(vcpu->arch.sie_block->cputm,
574                              (u64 __user *)reg->addr);
575                 break;
576         case KVM_REG_S390_CLOCK_COMP:
577                 r = get_user(vcpu->arch.sie_block->ckc,
578                              (u64 __user *)reg->addr);
579                 break;
580         default:
581                 break;
582         }
583
584         return r;
585 }
586
587 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
588 {
589         kvm_s390_vcpu_initial_reset(vcpu);
590         return 0;
591 }
592
593 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
594 {
595         memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
596         return 0;
597 }
598
599 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
600 {
601         memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
602         return 0;
603 }
604
605 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
606                                   struct kvm_sregs *sregs)
607 {
608         memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
609         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
610         restore_access_regs(vcpu->run->s.regs.acrs);
611         return 0;
612 }
613
614 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
615                                   struct kvm_sregs *sregs)
616 {
617         memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
618         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
619         return 0;
620 }
621
622 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
623 {
624         if (test_fp_ctl(fpu->fpc))
625                 return -EINVAL;
626         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
627         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
628         restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
629         restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
630         return 0;
631 }
632
633 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
634 {
635         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
636         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
637         return 0;
638 }
639
640 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
641 {
642         int rc = 0;
643
644         if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
645                 rc = -EBUSY;
646         else {
647                 vcpu->run->psw_mask = psw.mask;
648                 vcpu->run->psw_addr = psw.addr;
649         }
650         return rc;
651 }
652
653 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
654                                   struct kvm_translation *tr)
655 {
656         return -EINVAL; /* not implemented yet */
657 }
658
659 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
660                                         struct kvm_guest_debug *dbg)
661 {
662         return -EINVAL; /* not implemented yet */
663 }
664
665 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
666                                     struct kvm_mp_state *mp_state)
667 {
668         return -EINVAL; /* not implemented yet */
669 }
670
671 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
672                                     struct kvm_mp_state *mp_state)
673 {
674         return -EINVAL; /* not implemented yet */
675 }
676
677 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
678 {
679         /*
680          * We use MMU_RELOAD just to re-arm the ipte notifier for the
681          * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
682          * This ensures that the ipte instruction for this request has
683          * already finished. We might race against a second unmapper that
684          * wants to set the blocking bit. Lets just retry the request loop.
685          */
686         while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
687                 int rc;
688                 rc = gmap_ipte_notify(vcpu->arch.gmap,
689                                       vcpu->arch.sie_block->prefix,
690                                       PAGE_SIZE * 2);
691                 if (rc)
692                         return rc;
693                 s390_vcpu_unblock(vcpu);
694         }
695         return 0;
696 }
697
698 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
699 {
700         int rc, cpuflags;
701
702         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
703
704         if (need_resched())
705                 schedule();
706
707         if (test_thread_flag(TIF_MCCK_PENDING))
708                 s390_handle_mcck();
709
710         if (!kvm_is_ucontrol(vcpu->kvm))
711                 kvm_s390_deliver_pending_interrupts(vcpu);
712
713         rc = kvm_s390_handle_requests(vcpu);
714         if (rc)
715                 return rc;
716
717         vcpu->arch.sie_block->icptcode = 0;
718         cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
719         VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
720         trace_kvm_s390_sie_enter(vcpu, cpuflags);
721
722         return 0;
723 }
724
725 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
726 {
727         int rc;
728
729         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
730                    vcpu->arch.sie_block->icptcode);
731         trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
732
733         if (exit_reason >= 0) {
734                 rc = 0;
735         } else {
736                 if (kvm_is_ucontrol(vcpu->kvm)) {
737                         rc = SIE_INTERCEPT_UCONTROL;
738                 } else {
739                         VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
740                         trace_kvm_s390_sie_fault(vcpu);
741                         rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
742                 }
743         }
744
745         memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
746
747         if (rc == 0) {
748                 if (kvm_is_ucontrol(vcpu->kvm))
749                         rc = -EOPNOTSUPP;
750                 else
751                         rc = kvm_handle_sie_intercept(vcpu);
752         }
753
754         return rc;
755 }
756
757 static int __vcpu_run(struct kvm_vcpu *vcpu)
758 {
759         int rc, exit_reason;
760
761         /*
762          * We try to hold kvm->srcu during most of vcpu_run (except when run-
763          * ning the guest), so that memslots (and other stuff) are protected
764          */
765         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
766
767         do {
768                 rc = vcpu_pre_run(vcpu);
769                 if (rc)
770                         break;
771
772                 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
773                 /*
774                  * As PF_VCPU will be used in fault handler, between
775                  * guest_enter and guest_exit should be no uaccess.
776                  */
777                 preempt_disable();
778                 kvm_guest_enter();
779                 preempt_enable();
780                 exit_reason = sie64a(vcpu->arch.sie_block,
781                                      vcpu->run->s.regs.gprs);
782                 kvm_guest_exit();
783                 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
784
785                 rc = vcpu_post_run(vcpu, exit_reason);
786         } while (!signal_pending(current) && !rc);
787
788         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
789         return rc;
790 }
791
792 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
793 {
794         int rc;
795         sigset_t sigsaved;
796
797         if (vcpu->sigset_active)
798                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
799
800         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
801
802         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
803
804         switch (kvm_run->exit_reason) {
805         case KVM_EXIT_S390_SIEIC:
806         case KVM_EXIT_UNKNOWN:
807         case KVM_EXIT_INTR:
808         case KVM_EXIT_S390_RESET:
809         case KVM_EXIT_S390_UCONTROL:
810         case KVM_EXIT_S390_TSCH:
811                 break;
812         default:
813                 BUG();
814         }
815
816         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
817         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
818         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
819                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
820                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
821         }
822         if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
823                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
824                 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
825                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
826         }
827
828         might_fault();
829         rc = __vcpu_run(vcpu);
830
831         if (signal_pending(current) && !rc) {
832                 kvm_run->exit_reason = KVM_EXIT_INTR;
833                 rc = -EINTR;
834         }
835
836 #ifdef CONFIG_KVM_S390_UCONTROL
837         if (rc == SIE_INTERCEPT_UCONTROL) {
838                 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
839                 kvm_run->s390_ucontrol.trans_exc_code =
840                         current->thread.gmap_addr;
841                 kvm_run->s390_ucontrol.pgm_code = 0x10;
842                 rc = 0;
843         }
844 #endif
845
846         if (rc == -EOPNOTSUPP) {
847                 /* intercept cannot be handled in-kernel, prepare kvm-run */
848                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
849                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
850                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
851                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
852                 rc = 0;
853         }
854
855         if (rc == -EREMOTE) {
856                 /* intercept was handled, but userspace support is needed
857                  * kvm_run has been prepared by the handler */
858                 rc = 0;
859         }
860
861         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
862         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
863         kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
864         memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
865
866         if (vcpu->sigset_active)
867                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
868
869         vcpu->stat.exit_userspace++;
870         return rc;
871 }
872
873 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
874                        unsigned long n, int prefix)
875 {
876         if (prefix)
877                 return copy_to_guest(vcpu, guestdest, from, n);
878         else
879                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
880 }
881
882 /*
883  * store status at address
884  * we use have two special cases:
885  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
886  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
887  */
888 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
889 {
890         unsigned char archmode = 1;
891         int prefix;
892
893         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
894                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
895                         return -EFAULT;
896                 addr = SAVE_AREA_BASE;
897                 prefix = 0;
898         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
899                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
900                         return -EFAULT;
901                 addr = SAVE_AREA_BASE;
902                 prefix = 1;
903         } else
904                 prefix = 0;
905
906         /*
907          * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
908          * copying in vcpu load/put. Lets update our copies before we save
909          * it into the save area
910          */
911         save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
912         save_fp_regs(vcpu->arch.guest_fpregs.fprs);
913         save_access_regs(vcpu->run->s.regs.acrs);
914
915         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
916                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
917                 return -EFAULT;
918
919         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
920                         vcpu->run->s.regs.gprs, 128, prefix))
921                 return -EFAULT;
922
923         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
924                         &vcpu->arch.sie_block->gpsw, 16, prefix))
925                 return -EFAULT;
926
927         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
928                         &vcpu->arch.sie_block->prefix, 4, prefix))
929                 return -EFAULT;
930
931         if (__guestcopy(vcpu,
932                         addr + offsetof(struct save_area, fp_ctrl_reg),
933                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
934                 return -EFAULT;
935
936         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
937                         &vcpu->arch.sie_block->todpr, 4, prefix))
938                 return -EFAULT;
939
940         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
941                         &vcpu->arch.sie_block->cputm, 8, prefix))
942                 return -EFAULT;
943
944         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
945                         &vcpu->arch.sie_block->ckc, 8, prefix))
946                 return -EFAULT;
947
948         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
949                         &vcpu->run->s.regs.acrs, 64, prefix))
950                 return -EFAULT;
951
952         if (__guestcopy(vcpu,
953                         addr + offsetof(struct save_area, ctrl_regs),
954                         &vcpu->arch.sie_block->gcr, 128, prefix))
955                 return -EFAULT;
956         return 0;
957 }
958
959 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
960                                      struct kvm_enable_cap *cap)
961 {
962         int r;
963
964         if (cap->flags)
965                 return -EINVAL;
966
967         switch (cap->cap) {
968         case KVM_CAP_S390_CSS_SUPPORT:
969                 if (!vcpu->kvm->arch.css_support) {
970                         vcpu->kvm->arch.css_support = 1;
971                         trace_kvm_s390_enable_css(vcpu->kvm);
972                 }
973                 r = 0;
974                 break;
975         default:
976                 r = -EINVAL;
977                 break;
978         }
979         return r;
980 }
981
982 long kvm_arch_vcpu_ioctl(struct file *filp,
983                          unsigned int ioctl, unsigned long arg)
984 {
985         struct kvm_vcpu *vcpu = filp->private_data;
986         void __user *argp = (void __user *)arg;
987         int idx;
988         long r;
989
990         switch (ioctl) {
991         case KVM_S390_INTERRUPT: {
992                 struct kvm_s390_interrupt s390int;
993
994                 r = -EFAULT;
995                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
996                         break;
997                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
998                 break;
999         }
1000         case KVM_S390_STORE_STATUS:
1001                 idx = srcu_read_lock(&vcpu->kvm->srcu);
1002                 r = kvm_s390_vcpu_store_status(vcpu, arg);
1003                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1004                 break;
1005         case KVM_S390_SET_INITIAL_PSW: {
1006                 psw_t psw;
1007
1008                 r = -EFAULT;
1009                 if (copy_from_user(&psw, argp, sizeof(psw)))
1010                         break;
1011                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1012                 break;
1013         }
1014         case KVM_S390_INITIAL_RESET:
1015                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1016                 break;
1017         case KVM_SET_ONE_REG:
1018         case KVM_GET_ONE_REG: {
1019                 struct kvm_one_reg reg;
1020                 r = -EFAULT;
1021                 if (copy_from_user(&reg, argp, sizeof(reg)))
1022                         break;
1023                 if (ioctl == KVM_SET_ONE_REG)
1024                         r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1025                 else
1026                         r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1027                 break;
1028         }
1029 #ifdef CONFIG_KVM_S390_UCONTROL
1030         case KVM_S390_UCAS_MAP: {
1031                 struct kvm_s390_ucas_mapping ucasmap;
1032
1033                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1034                         r = -EFAULT;
1035                         break;
1036                 }
1037
1038                 if (!kvm_is_ucontrol(vcpu->kvm)) {
1039                         r = -EINVAL;
1040                         break;
1041                 }
1042
1043                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1044                                      ucasmap.vcpu_addr, ucasmap.length);
1045                 break;
1046         }
1047         case KVM_S390_UCAS_UNMAP: {
1048                 struct kvm_s390_ucas_mapping ucasmap;
1049
1050                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1051                         r = -EFAULT;
1052                         break;
1053                 }
1054
1055                 if (!kvm_is_ucontrol(vcpu->kvm)) {
1056                         r = -EINVAL;
1057                         break;
1058                 }
1059
1060                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1061                         ucasmap.length);
1062                 break;
1063         }
1064 #endif
1065         case KVM_S390_VCPU_FAULT: {
1066                 r = gmap_fault(arg, vcpu->arch.gmap);
1067                 if (!IS_ERR_VALUE(r))
1068                         r = 0;
1069                 break;
1070         }
1071         case KVM_ENABLE_CAP:
1072         {
1073                 struct kvm_enable_cap cap;
1074                 r = -EFAULT;
1075                 if (copy_from_user(&cap, argp, sizeof(cap)))
1076                         break;
1077                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1078                 break;
1079         }
1080         default:
1081                 r = -ENOTTY;
1082         }
1083         return r;
1084 }
1085
1086 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1087 {
1088 #ifdef CONFIG_KVM_S390_UCONTROL
1089         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1090                  && (kvm_is_ucontrol(vcpu->kvm))) {
1091                 vmf->page = virt_to_page(vcpu->arch.sie_block);
1092                 get_page(vmf->page);
1093                 return 0;
1094         }
1095 #endif
1096         return VM_FAULT_SIGBUS;
1097 }
1098
1099 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
1100                            struct kvm_memory_slot *dont)
1101 {
1102 }
1103
1104 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
1105 {
1106         return 0;
1107 }
1108
1109 void kvm_arch_memslots_updated(struct kvm *kvm)
1110 {
1111 }
1112
1113 /* Section: memory related */
1114 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1115                                    struct kvm_memory_slot *memslot,
1116                                    struct kvm_userspace_memory_region *mem,
1117                                    enum kvm_mr_change change)
1118 {
1119         /* A few sanity checks. We can have memory slots which have to be
1120            located/ended at a segment boundary (1MB). The memory in userland is
1121            ok to be fragmented into various different vmas. It is okay to mmap()
1122            and munmap() stuff in this slot after doing this call at any time */
1123
1124         if (mem->userspace_addr & 0xffffful)
1125                 return -EINVAL;
1126
1127         if (mem->memory_size & 0xffffful)
1128                 return -EINVAL;
1129
1130         return 0;
1131 }
1132
1133 void kvm_arch_commit_memory_region(struct kvm *kvm,
1134                                 struct kvm_userspace_memory_region *mem,
1135                                 const struct kvm_memory_slot *old,
1136                                 enum kvm_mr_change change)
1137 {
1138         int rc;
1139
1140         /* If the basics of the memslot do not change, we do not want
1141          * to update the gmap. Every update causes several unnecessary
1142          * segment translation exceptions. This is usually handled just
1143          * fine by the normal fault handler + gmap, but it will also
1144          * cause faults on the prefix page of running guest CPUs.
1145          */
1146         if (old->userspace_addr == mem->userspace_addr &&
1147             old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1148             old->npages * PAGE_SIZE == mem->memory_size)
1149                 return;
1150
1151         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1152                 mem->guest_phys_addr, mem->memory_size);
1153         if (rc)
1154                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1155         return;
1156 }
1157
1158 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1159 {
1160 }
1161
1162 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1163                                    struct kvm_memory_slot *slot)
1164 {
1165 }
1166
1167 static int __init kvm_s390_init(void)
1168 {
1169         int ret;
1170         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1171         if (ret)
1172                 return ret;
1173
1174         /*
1175          * guests can ask for up to 255+1 double words, we need a full page
1176          * to hold the maximum amount of facilities. On the other hand, we
1177          * only set facilities that are known to work in KVM.
1178          */
1179         vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1180         if (!vfacilities) {
1181                 kvm_exit();
1182                 return -ENOMEM;
1183         }
1184         memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1185         vfacilities[0] &= 0xff82fff3f47c0000UL;
1186         vfacilities[1] &= 0x001c000000000000UL;
1187         return 0;
1188 }
1189
1190 static void __exit kvm_s390_exit(void)
1191 {
1192         free_page((unsigned long) vfacilities);
1193         kvm_exit();
1194 }
1195
1196 module_init(kvm_s390_init);
1197 module_exit(kvm_s390_exit);
1198
1199 /*
1200  * Enable autoloading of the kvm module.
1201  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1202  * since x86 takes a different approach.
1203  */
1204 #include <linux/miscdevice.h>
1205 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1206 MODULE_ALIAS("devname:kvm");