]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/powerpc/kvm/emulate.c
ARM: delete struct sys_timer
[karo-tx-linux.git] / arch / powerpc / kvm / emulate.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  * Copyright 2011 Freescale Semiconductor, Inc.
17  *
18  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19  */
20
21 #include <linux/jiffies.h>
22 #include <linux/hrtimer.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/kvm_host.h>
26 #include <linux/clockchips.h>
27
28 #include <asm/reg.h>
29 #include <asm/time.h>
30 #include <asm/byteorder.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/disassemble.h>
33 #include "timing.h"
34 #include "trace.h"
35
36 #define OP_TRAP 3
37 #define OP_TRAP_64 2
38
39 #define OP_31_XOP_TRAP      4
40 #define OP_31_XOP_LWZX      23
41 #define OP_31_XOP_TRAP_64   68
42 #define OP_31_XOP_LBZX      87
43 #define OP_31_XOP_STWX      151
44 #define OP_31_XOP_STBX      215
45 #define OP_31_XOP_LBZUX     119
46 #define OP_31_XOP_STBUX     247
47 #define OP_31_XOP_LHZX      279
48 #define OP_31_XOP_LHZUX     311
49 #define OP_31_XOP_MFSPR     339
50 #define OP_31_XOP_LHAX      343
51 #define OP_31_XOP_STHX      407
52 #define OP_31_XOP_STHUX     439
53 #define OP_31_XOP_MTSPR     467
54 #define OP_31_XOP_DCBI      470
55 #define OP_31_XOP_LWBRX     534
56 #define OP_31_XOP_TLBSYNC   566
57 #define OP_31_XOP_STWBRX    662
58 #define OP_31_XOP_LHBRX     790
59 #define OP_31_XOP_STHBRX    918
60
61 #define OP_LWZ  32
62 #define OP_LD   58
63 #define OP_LWZU 33
64 #define OP_LBZ  34
65 #define OP_LBZU 35
66 #define OP_STW  36
67 #define OP_STWU 37
68 #define OP_STD  62
69 #define OP_STB  38
70 #define OP_STBU 39
71 #define OP_LHZ  40
72 #define OP_LHZU 41
73 #define OP_LHA  42
74 #define OP_LHAU 43
75 #define OP_STH  44
76 #define OP_STHU 45
77
78 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
79 {
80         unsigned long dec_nsec;
81         unsigned long long dec_time;
82
83         pr_debug("mtDEC: %x\n", vcpu->arch.dec);
84         hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
85
86 #ifdef CONFIG_PPC_BOOK3S
87         /* mtdec lowers the interrupt line when positive. */
88         kvmppc_core_dequeue_dec(vcpu);
89
90         /* POWER4+ triggers a dec interrupt if the value is < 0 */
91         if (vcpu->arch.dec & 0x80000000) {
92                 kvmppc_core_queue_dec(vcpu);
93                 return;
94         }
95 #endif
96
97 #ifdef CONFIG_BOOKE
98         /* On BOOKE, DEC = 0 is as good as decrementer not enabled */
99         if (vcpu->arch.dec == 0)
100                 return;
101 #endif
102
103         /*
104          * The decrementer ticks at the same rate as the timebase, so
105          * that's how we convert the guest DEC value to the number of
106          * host ticks.
107          */
108
109         dec_time = vcpu->arch.dec;
110         /*
111          * Guest timebase ticks at the same frequency as host decrementer.
112          * So use the host decrementer calculations for decrementer emulation.
113          */
114         dec_time = dec_time << decrementer_clockevent.shift;
115         do_div(dec_time, decrementer_clockevent.mult);
116         dec_nsec = do_div(dec_time, NSEC_PER_SEC);
117         hrtimer_start(&vcpu->arch.dec_timer,
118                 ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
119         vcpu->arch.dec_jiffies = get_tb();
120 }
121
122 u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
123 {
124         u64 jd = tb - vcpu->arch.dec_jiffies;
125
126 #ifdef CONFIG_BOOKE
127         if (vcpu->arch.dec < jd)
128                 return 0;
129 #endif
130
131         return vcpu->arch.dec - jd;
132 }
133
134 static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
135 {
136         enum emulation_result emulated = EMULATE_DONE;
137         ulong spr_val = kvmppc_get_gpr(vcpu, rs);
138
139         switch (sprn) {
140         case SPRN_SRR0:
141                 vcpu->arch.shared->srr0 = spr_val;
142                 break;
143         case SPRN_SRR1:
144                 vcpu->arch.shared->srr1 = spr_val;
145                 break;
146
147         /* XXX We need to context-switch the timebase for
148          * watchdog and FIT. */
149         case SPRN_TBWL: break;
150         case SPRN_TBWU: break;
151
152         case SPRN_MSSSR0: break;
153
154         case SPRN_DEC:
155                 vcpu->arch.dec = spr_val;
156                 kvmppc_emulate_dec(vcpu);
157                 break;
158
159         case SPRN_SPRG0:
160                 vcpu->arch.shared->sprg0 = spr_val;
161                 break;
162         case SPRN_SPRG1:
163                 vcpu->arch.shared->sprg1 = spr_val;
164                 break;
165         case SPRN_SPRG2:
166                 vcpu->arch.shared->sprg2 = spr_val;
167                 break;
168         case SPRN_SPRG3:
169                 vcpu->arch.shared->sprg3 = spr_val;
170                 break;
171
172         default:
173                 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
174                                                      spr_val);
175                 if (emulated == EMULATE_FAIL)
176                         printk(KERN_INFO "mtspr: unknown spr "
177                                 "0x%x\n", sprn);
178                 break;
179         }
180
181         kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
182
183         return emulated;
184 }
185
186 static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
187 {
188         enum emulation_result emulated = EMULATE_DONE;
189         ulong spr_val = 0;
190
191         switch (sprn) {
192         case SPRN_SRR0:
193                 spr_val = vcpu->arch.shared->srr0;
194                 break;
195         case SPRN_SRR1:
196                 spr_val = vcpu->arch.shared->srr1;
197                 break;
198         case SPRN_PVR:
199                 spr_val = vcpu->arch.pvr;
200                 break;
201         case SPRN_PIR:
202                 spr_val = vcpu->vcpu_id;
203                 break;
204         case SPRN_MSSSR0:
205                 spr_val = 0;
206                 break;
207
208         /* Note: mftb and TBRL/TBWL are user-accessible, so
209          * the guest can always access the real TB anyways.
210          * In fact, we probably will never see these traps. */
211         case SPRN_TBWL:
212                 spr_val = get_tb() >> 32;
213                 break;
214         case SPRN_TBWU:
215                 spr_val = get_tb();
216                 break;
217
218         case SPRN_SPRG0:
219                 spr_val = vcpu->arch.shared->sprg0;
220                 break;
221         case SPRN_SPRG1:
222                 spr_val = vcpu->arch.shared->sprg1;
223                 break;
224         case SPRN_SPRG2:
225                 spr_val = vcpu->arch.shared->sprg2;
226                 break;
227         case SPRN_SPRG3:
228                 spr_val = vcpu->arch.shared->sprg3;
229                 break;
230         /* Note: SPRG4-7 are user-readable, so we don't get
231          * a trap. */
232
233         case SPRN_DEC:
234                 spr_val = kvmppc_get_dec(vcpu, get_tb());
235                 break;
236         default:
237                 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
238                                                      &spr_val);
239                 if (unlikely(emulated == EMULATE_FAIL)) {
240                         printk(KERN_INFO "mfspr: unknown spr "
241                                 "0x%x\n", sprn);
242                 }
243                 break;
244         }
245
246         if (emulated == EMULATE_DONE)
247                 kvmppc_set_gpr(vcpu, rt, spr_val);
248         kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
249
250         return emulated;
251 }
252
253 /* XXX to do:
254  * lhax
255  * lhaux
256  * lswx
257  * lswi
258  * stswx
259  * stswi
260  * lha
261  * lhau
262  * lmw
263  * stmw
264  *
265  * XXX is_bigendian should depend on MMU mapping or MSR[LE]
266  */
267 /* XXX Should probably auto-generate instruction decoding for a particular core
268  * from opcode tables in the future. */
269 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
270 {
271         u32 inst = kvmppc_get_last_inst(vcpu);
272         int ra = get_ra(inst);
273         int rs = get_rs(inst);
274         int rt = get_rt(inst);
275         int sprn = get_sprn(inst);
276         enum emulation_result emulated = EMULATE_DONE;
277         int advance = 1;
278
279         /* this default type might be overwritten by subcategories */
280         kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
281
282         pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
283
284         switch (get_op(inst)) {
285         case OP_TRAP:
286 #ifdef CONFIG_PPC_BOOK3S
287         case OP_TRAP_64:
288                 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
289 #else
290                 kvmppc_core_queue_program(vcpu,
291                                           vcpu->arch.shared->esr | ESR_PTR);
292 #endif
293                 advance = 0;
294                 break;
295
296         case 31:
297                 switch (get_xop(inst)) {
298
299                 case OP_31_XOP_TRAP:
300 #ifdef CONFIG_64BIT
301                 case OP_31_XOP_TRAP_64:
302 #endif
303 #ifdef CONFIG_PPC_BOOK3S
304                         kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
305 #else
306                         kvmppc_core_queue_program(vcpu,
307                                         vcpu->arch.shared->esr | ESR_PTR);
308 #endif
309                         advance = 0;
310                         break;
311                 case OP_31_XOP_LWZX:
312                         emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
313                         break;
314
315                 case OP_31_XOP_LBZX:
316                         emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
317                         break;
318
319                 case OP_31_XOP_LBZUX:
320                         emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
321                         kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
322                         break;
323
324                 case OP_31_XOP_STWX:
325                         emulated = kvmppc_handle_store(run, vcpu,
326                                                        kvmppc_get_gpr(vcpu, rs),
327                                                        4, 1);
328                         break;
329
330                 case OP_31_XOP_STBX:
331                         emulated = kvmppc_handle_store(run, vcpu,
332                                                        kvmppc_get_gpr(vcpu, rs),
333                                                        1, 1);
334                         break;
335
336                 case OP_31_XOP_STBUX:
337                         emulated = kvmppc_handle_store(run, vcpu,
338                                                        kvmppc_get_gpr(vcpu, rs),
339                                                        1, 1);
340                         kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
341                         break;
342
343                 case OP_31_XOP_LHAX:
344                         emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
345                         break;
346
347                 case OP_31_XOP_LHZX:
348                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
349                         break;
350
351                 case OP_31_XOP_LHZUX:
352                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
353                         kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
354                         break;
355
356                 case OP_31_XOP_MFSPR:
357                         emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
358                         break;
359
360                 case OP_31_XOP_STHX:
361                         emulated = kvmppc_handle_store(run, vcpu,
362                                                        kvmppc_get_gpr(vcpu, rs),
363                                                        2, 1);
364                         break;
365
366                 case OP_31_XOP_STHUX:
367                         emulated = kvmppc_handle_store(run, vcpu,
368                                                        kvmppc_get_gpr(vcpu, rs),
369                                                        2, 1);
370                         kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
371                         break;
372
373                 case OP_31_XOP_MTSPR:
374                         emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
375                         break;
376
377                 case OP_31_XOP_DCBI:
378                         /* Do nothing. The guest is performing dcbi because
379                          * hardware DMA is not snooped by the dcache, but
380                          * emulated DMA either goes through the dcache as
381                          * normal writes, or the host kernel has handled dcache
382                          * coherence. */
383                         break;
384
385                 case OP_31_XOP_LWBRX:
386                         emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
387                         break;
388
389                 case OP_31_XOP_TLBSYNC:
390                         break;
391
392                 case OP_31_XOP_STWBRX:
393                         emulated = kvmppc_handle_store(run, vcpu,
394                                                        kvmppc_get_gpr(vcpu, rs),
395                                                        4, 0);
396                         break;
397
398                 case OP_31_XOP_LHBRX:
399                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
400                         break;
401
402                 case OP_31_XOP_STHBRX:
403                         emulated = kvmppc_handle_store(run, vcpu,
404                                                        kvmppc_get_gpr(vcpu, rs),
405                                                        2, 0);
406                         break;
407
408                 default:
409                         /* Attempt core-specific emulation below. */
410                         emulated = EMULATE_FAIL;
411                 }
412                 break;
413
414         case OP_LWZ:
415                 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
416                 break;
417
418         /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
419         case OP_LD:
420                 rt = get_rt(inst);
421                 emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
422                 break;
423
424         case OP_LWZU:
425                 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
426                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
427                 break;
428
429         case OP_LBZ:
430                 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
431                 break;
432
433         case OP_LBZU:
434                 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
435                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
436                 break;
437
438         case OP_STW:
439                 emulated = kvmppc_handle_store(run, vcpu,
440                                                kvmppc_get_gpr(vcpu, rs),
441                                                4, 1);
442                 break;
443
444         /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
445         case OP_STD:
446                 rs = get_rs(inst);
447                 emulated = kvmppc_handle_store(run, vcpu,
448                                                kvmppc_get_gpr(vcpu, rs),
449                                                8, 1);
450                 break;
451
452         case OP_STWU:
453                 emulated = kvmppc_handle_store(run, vcpu,
454                                                kvmppc_get_gpr(vcpu, rs),
455                                                4, 1);
456                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
457                 break;
458
459         case OP_STB:
460                 emulated = kvmppc_handle_store(run, vcpu,
461                                                kvmppc_get_gpr(vcpu, rs),
462                                                1, 1);
463                 break;
464
465         case OP_STBU:
466                 emulated = kvmppc_handle_store(run, vcpu,
467                                                kvmppc_get_gpr(vcpu, rs),
468                                                1, 1);
469                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
470                 break;
471
472         case OP_LHZ:
473                 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
474                 break;
475
476         case OP_LHZU:
477                 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
478                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
479                 break;
480
481         case OP_LHA:
482                 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
483                 break;
484
485         case OP_LHAU:
486                 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
487                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
488                 break;
489
490         case OP_STH:
491                 emulated = kvmppc_handle_store(run, vcpu,
492                                                kvmppc_get_gpr(vcpu, rs),
493                                                2, 1);
494                 break;
495
496         case OP_STHU:
497                 emulated = kvmppc_handle_store(run, vcpu,
498                                                kvmppc_get_gpr(vcpu, rs),
499                                                2, 1);
500                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
501                 break;
502
503         default:
504                 emulated = EMULATE_FAIL;
505         }
506
507         if (emulated == EMULATE_FAIL) {
508                 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
509                 if (emulated == EMULATE_AGAIN) {
510                         advance = 0;
511                 } else if (emulated == EMULATE_FAIL) {
512                         advance = 0;
513                         printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
514                                "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
515                         kvmppc_core_queue_program(vcpu, 0);
516                 }
517         }
518
519         trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
520
521         /* Advance past emulated instruction. */
522         if (advance)
523                 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
524
525         return emulated;
526 }