4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Zhi Wang <zhi.a.wang@intel.com>
27 * Zhenyu Wang <zhenyuw@linux.intel.com>
28 * Xiao Zheng <xiao.zheng@intel.com>
31 * Min He <min.he@intel.com>
32 * Bing Niu <bing.niu@intel.com>
38 #include "i915_pvinfo.h"
41 static bool enable_out_of_sync = false;
42 static int preallocated_oos_pages = 8192;
45 * validate a gm address and related range size,
46 * translate it to host gm address
48 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
50 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
51 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
52 gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
53 vgpu->id, addr, size);
59 /* translate a guest gmadr to host gmadr */
60 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
62 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
63 "invalid guest gmadr %llx\n", g_addr))
66 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
67 *h_addr = vgpu_aperture_gmadr_base(vgpu)
68 + (g_addr - vgpu_aperture_offset(vgpu));
70 *h_addr = vgpu_hidden_gmadr_base(vgpu)
71 + (g_addr - vgpu_hidden_offset(vgpu));
75 /* translate a host gmadr to guest gmadr */
76 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
78 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
79 "invalid host gmadr %llx\n", h_addr))
82 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
83 *g_addr = vgpu_aperture_gmadr_base(vgpu)
84 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
86 *g_addr = vgpu_hidden_gmadr_base(vgpu)
87 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
91 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
92 unsigned long *h_index)
97 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT,
102 *h_index = h_addr >> GTT_PAGE_SHIFT;
106 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
107 unsigned long *g_index)
112 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT,
117 *g_index = g_addr >> GTT_PAGE_SHIFT;
121 #define gtt_type_is_entry(type) \
122 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
123 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
124 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
126 #define gtt_type_is_pt(type) \
127 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
129 #define gtt_type_is_pte_pt(type) \
130 (type == GTT_TYPE_PPGTT_PTE_PT)
132 #define gtt_type_is_root_pointer(type) \
133 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
135 #define gtt_init_entry(e, t, p, v) do { \
138 memcpy(&(e)->val64, &v, sizeof(v)); \
142 * Mappings between GTT_TYPE* enumerations.
143 * Following information can be found according to the given type:
144 * - type of next level page table
145 * - type of entry inside this level page table
146 * - type of entry with PSE set
148 * If the given type doesn't have such a kind of information,
149 * e.g. give a l4 root entry type, then request to get its PSE type,
150 * give a PTE page table type, then request to get its next level page
151 * table type, as we know l4 root entry doesn't have a PSE bit,
152 * and a PTE page table doesn't have a next level page table type,
153 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
157 struct gtt_type_table_entry {
163 #define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \
165 .entry_type = e_type, \
166 .next_pt_type = npt_type, \
167 .pse_entry_type = pse_type, \
170 static struct gtt_type_table_entry gtt_type_table[] = {
171 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
172 GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
173 GTT_TYPE_PPGTT_PML4_PT,
175 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
176 GTT_TYPE_PPGTT_PML4_ENTRY,
177 GTT_TYPE_PPGTT_PDP_PT,
179 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
180 GTT_TYPE_PPGTT_PML4_ENTRY,
181 GTT_TYPE_PPGTT_PDP_PT,
183 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
184 GTT_TYPE_PPGTT_PDP_ENTRY,
185 GTT_TYPE_PPGTT_PDE_PT,
186 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
187 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
188 GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
189 GTT_TYPE_PPGTT_PDE_PT,
190 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
191 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
192 GTT_TYPE_PPGTT_PDP_ENTRY,
193 GTT_TYPE_PPGTT_PDE_PT,
194 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
195 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
196 GTT_TYPE_PPGTT_PDE_ENTRY,
197 GTT_TYPE_PPGTT_PTE_PT,
198 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
200 GTT_TYPE_PPGTT_PDE_ENTRY,
201 GTT_TYPE_PPGTT_PTE_PT,
202 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
203 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
204 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
207 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
208 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
211 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
212 GTT_TYPE_PPGTT_PDE_ENTRY,
214 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
215 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
216 GTT_TYPE_PPGTT_PDP_ENTRY,
218 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
219 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
225 static inline int get_next_pt_type(int type)
227 return gtt_type_table[type].next_pt_type;
230 static inline int get_entry_type(int type)
232 return gtt_type_table[type].entry_type;
235 static inline int get_pse_type(int type)
237 return gtt_type_table[type].pse_entry_type;
240 static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
242 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
248 pte = ioread32(addr);
249 pte |= (u64)ioread32(addr + 4) << 32;
254 static void write_pte64(struct drm_i915_private *dev_priv,
255 unsigned long index, u64 pte)
257 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
262 iowrite32((u32)pte, addr);
263 iowrite32(pte >> 32, addr + 4);
265 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
266 POSTING_READ(GFX_FLSH_CNTL_GEN6);
269 static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
270 struct intel_gvt_gtt_entry *e,
271 unsigned long index, bool hypervisor_access, unsigned long gpa,
272 struct intel_vgpu *vgpu)
274 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
277 if (WARN_ON(info->gtt_entry_size != 8))
280 if (hypervisor_access) {
281 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
282 (index << info->gtt_entry_size_shift),
286 e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
288 e->val64 = *((u64 *)pt + index);
293 static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
294 struct intel_gvt_gtt_entry *e,
295 unsigned long index, bool hypervisor_access, unsigned long gpa,
296 struct intel_vgpu *vgpu)
298 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
301 if (WARN_ON(info->gtt_entry_size != 8))
304 if (hypervisor_access) {
305 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
306 (index << info->gtt_entry_size_shift),
310 write_pte64(vgpu->gvt->dev_priv, index, e->val64);
312 *((u64 *)pt + index) = e->val64;
319 #define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
320 #define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
321 #define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
323 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
327 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
328 pfn = (e->val64 & ADDR_1G_MASK) >> 12;
329 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
330 pfn = (e->val64 & ADDR_2M_MASK) >> 12;
332 pfn = (e->val64 & ADDR_4K_MASK) >> 12;
336 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
338 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
339 e->val64 &= ~ADDR_1G_MASK;
340 pfn &= (ADDR_1G_MASK >> 12);
341 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
342 e->val64 &= ~ADDR_2M_MASK;
343 pfn &= (ADDR_2M_MASK >> 12);
345 e->val64 &= ~ADDR_4K_MASK;
346 pfn &= (ADDR_4K_MASK >> 12);
349 e->val64 |= (pfn << 12);
352 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
354 /* Entry doesn't have PSE bit. */
355 if (get_pse_type(e->type) == GTT_TYPE_INVALID)
358 e->type = get_entry_type(e->type);
359 if (!(e->val64 & (1 << 7)))
362 e->type = get_pse_type(e->type);
366 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
369 * i915 writes PDP root pointer registers without present bit,
370 * it also works, so we need to treat root pointer entry
373 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
374 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
375 return (e->val64 != 0);
377 return (e->val64 & (1 << 0));
380 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
382 e->val64 &= ~(1 << 0);
386 * Per-platform GMA routines.
388 static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
390 unsigned long x = (gma >> GTT_PAGE_SHIFT);
392 trace_gma_index(__func__, gma, x);
396 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
397 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
399 unsigned long x = (exp); \
400 trace_gma_index(__func__, gma, x); \
404 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
405 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
406 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
407 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
408 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
410 static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
411 .get_entry = gtt_get_entry64,
412 .set_entry = gtt_set_entry64,
413 .clear_present = gtt_entry_clear_present,
414 .test_present = gen8_gtt_test_present,
415 .test_pse = gen8_gtt_test_pse,
416 .get_pfn = gen8_gtt_get_pfn,
417 .set_pfn = gen8_gtt_set_pfn,
420 static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
421 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
422 .gma_to_pte_index = gen8_gma_to_pte_index,
423 .gma_to_pde_index = gen8_gma_to_pde_index,
424 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
425 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
426 .gma_to_pml4_index = gen8_gma_to_pml4_index,
429 static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
430 struct intel_gvt_gtt_entry *m)
432 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
433 unsigned long gfn, mfn;
437 if (!ops->test_present(p))
440 gfn = ops->get_pfn(p);
442 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
443 if (mfn == INTEL_GVT_INVALID_ADDR) {
444 gvt_err("fail to translate gfn: 0x%lx\n", gfn);
448 ops->set_pfn(m, mfn);
455 struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
456 void *page_table, struct intel_gvt_gtt_entry *e,
459 struct intel_gvt *gvt = mm->vgpu->gvt;
460 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
462 e->type = mm->page_table_entry_type;
464 ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
469 struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
470 void *page_table, struct intel_gvt_gtt_entry *e,
473 struct intel_gvt *gvt = mm->vgpu->gvt;
474 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
476 return ops->set_entry(page_table, e, index, false, 0, mm->vgpu);
480 * PPGTT shadow page table helpers.
482 static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
483 struct intel_vgpu_ppgtt_spt *spt,
484 void *page_table, int type,
485 struct intel_gvt_gtt_entry *e, unsigned long index,
488 struct intel_gvt *gvt = spt->vgpu->gvt;
489 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
491 e->type = get_entry_type(type);
493 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
496 ops->get_entry(page_table, e, index, guest,
497 spt->guest_page.gfn << GTT_PAGE_SHIFT,
503 static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
504 struct intel_vgpu_ppgtt_spt *spt,
505 void *page_table, int type,
506 struct intel_gvt_gtt_entry *e, unsigned long index,
509 struct intel_gvt *gvt = spt->vgpu->gvt;
510 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
512 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
515 return ops->set_entry(page_table, e, index, guest,
516 spt->guest_page.gfn << GTT_PAGE_SHIFT,
520 #define ppgtt_get_guest_entry(spt, e, index) \
521 ppgtt_spt_get_entry(spt, NULL, \
522 spt->guest_page_type, e, index, true)
524 #define ppgtt_set_guest_entry(spt, e, index) \
525 ppgtt_spt_set_entry(spt, NULL, \
526 spt->guest_page_type, e, index, true)
528 #define ppgtt_get_shadow_entry(spt, e, index) \
529 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
530 spt->shadow_page.type, e, index, false)
532 #define ppgtt_set_shadow_entry(spt, e, index) \
533 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
534 spt->shadow_page.type, e, index, false)
537 * intel_vgpu_init_guest_page - init a guest page data structure
539 * @p: a guest page data structure
540 * @gfn: guest memory page frame number
541 * @handler: function will be called when target guest memory page has
544 * This function is called when user wants to track a guest memory page.
547 * Zero on success, negative error code if failed.
549 int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
550 struct intel_vgpu_guest_page *p,
552 int (*handler)(void *, u64, void *, int),
555 INIT_HLIST_NODE(&p->node);
557 p->writeprotection = false;
559 p->handler = handler;
564 hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn);
568 static int detach_oos_page(struct intel_vgpu *vgpu,
569 struct intel_vgpu_oos_page *oos_page);
572 * intel_vgpu_clean_guest_page - release the resource owned by guest page data
575 * @p: a tracked guest page
577 * This function is called when user tries to stop tracking a guest memory
580 void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
581 struct intel_vgpu_guest_page *p)
583 if (!hlist_unhashed(&p->node))
587 detach_oos_page(vgpu, p->oos_page);
589 if (p->writeprotection)
590 intel_gvt_hypervisor_unset_wp_page(vgpu, p);
594 * intel_vgpu_find_guest_page - find a guest page data structure by GFN.
596 * @gfn: guest memory page frame number
598 * This function is called when emulation logic wants to know if a trapped GFN
599 * is a tracked guest page.
602 * Pointer to guest page data structure, NULL if failed.
604 struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
605 struct intel_vgpu *vgpu, unsigned long gfn)
607 struct intel_vgpu_guest_page *p;
609 hash_for_each_possible(vgpu->gtt.guest_page_hash_table,
617 static inline int init_shadow_page(struct intel_vgpu *vgpu,
618 struct intel_vgpu_shadow_page *p, int type)
620 p->vaddr = page_address(p->page);
623 INIT_HLIST_NODE(&p->node);
625 p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr);
626 if (p->mfn == INTEL_GVT_INVALID_ADDR)
629 hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
633 static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p)
635 if (!hlist_unhashed(&p->node))
639 static inline struct intel_vgpu_shadow_page *find_shadow_page(
640 struct intel_vgpu *vgpu, unsigned long mfn)
642 struct intel_vgpu_shadow_page *p;
644 hash_for_each_possible(vgpu->gtt.shadow_page_hash_table,
652 #define guest_page_to_ppgtt_spt(ptr) \
653 container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
655 #define shadow_page_to_ppgtt_spt(ptr) \
656 container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
658 static void *alloc_spt(gfp_t gfp_mask)
660 struct intel_vgpu_ppgtt_spt *spt;
662 spt = kzalloc(sizeof(*spt), gfp_mask);
666 spt->shadow_page.page = alloc_page(gfp_mask);
667 if (!spt->shadow_page.page) {
674 static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
676 __free_page(spt->shadow_page.page);
680 static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
682 trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
684 clean_shadow_page(&spt->shadow_page);
685 intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
686 list_del_init(&spt->post_shadow_list);
691 static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
693 struct hlist_node *n;
694 struct intel_vgpu_shadow_page *sp;
697 hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node)
698 ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
701 static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
702 u64 pa, void *p_data, int bytes);
704 static int ppgtt_write_protection_handler(void *gp, u64 pa,
705 void *p_data, int bytes)
707 struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
710 if (bytes != 4 && bytes != 8)
713 if (!gpt->writeprotection)
716 ret = ppgtt_handle_guest_write_page_table_bytes(gp,
723 static int reclaim_one_mm(struct intel_gvt *gvt);
725 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
726 struct intel_vgpu *vgpu, int type, unsigned long gfn)
728 struct intel_vgpu_ppgtt_spt *spt = NULL;
732 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
734 if (reclaim_one_mm(vgpu->gvt))
737 gvt_err("fail to allocate ppgtt shadow page\n");
738 return ERR_PTR(-ENOMEM);
742 spt->guest_page_type = type;
743 atomic_set(&spt->refcount, 1);
744 INIT_LIST_HEAD(&spt->post_shadow_list);
747 * TODO: guest page type may be different with shadow page type,
748 * when we support PSE page in future.
750 ret = init_shadow_page(vgpu, &spt->shadow_page, type);
752 gvt_err("fail to initialize shadow page for spt\n");
756 ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
757 gfn, ppgtt_write_protection_handler, NULL);
759 gvt_err("fail to initialize guest page for spt\n");
763 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
766 ppgtt_free_shadow_page(spt);
770 static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
771 struct intel_vgpu *vgpu, unsigned long mfn)
773 struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn);
776 return shadow_page_to_ppgtt_spt(p);
778 gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
783 #define pt_entry_size_shift(spt) \
784 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
786 #define pt_entries(spt) \
787 (GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
789 #define for_each_present_guest_entry(spt, e, i) \
790 for (i = 0; i < pt_entries(spt); i++) \
791 if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
792 ppgtt_get_guest_entry(spt, e, i)))
794 #define for_each_present_shadow_entry(spt, e, i) \
795 for (i = 0; i < pt_entries(spt); i++) \
796 if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
797 ppgtt_get_shadow_entry(spt, e, i)))
799 static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
801 int v = atomic_read(&spt->refcount);
803 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
805 atomic_inc(&spt->refcount);
808 static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
810 static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
811 struct intel_gvt_gtt_entry *e)
813 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
814 struct intel_vgpu_ppgtt_spt *s;
815 intel_gvt_gtt_type_t cur_pt_type;
817 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
820 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
821 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
822 cur_pt_type = get_next_pt_type(e->type) + 1;
823 if (ops->get_pfn(e) ==
824 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
827 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
829 gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
830 vgpu->id, ops->get_pfn(e));
833 return ppgtt_invalidate_shadow_page(s);
836 static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
838 struct intel_gvt_gtt_entry e;
841 int v = atomic_read(&spt->refcount);
843 trace_spt_change(spt->vgpu->id, "die", spt,
844 spt->guest_page.gfn, spt->shadow_page.type);
846 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
848 if (atomic_dec_return(&spt->refcount) > 0)
851 if (gtt_type_is_pte_pt(spt->shadow_page.type))
854 for_each_present_shadow_entry(spt, &e, index) {
855 if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
856 gvt_err("GVT doesn't support pse bit for now\n");
859 ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
865 trace_spt_change(spt->vgpu->id, "release", spt,
866 spt->guest_page.gfn, spt->shadow_page.type);
867 ppgtt_free_shadow_page(spt);
870 gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n",
871 spt->vgpu->id, spt, e.val64, e.type);
875 static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
877 static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
878 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
880 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
881 struct intel_vgpu_ppgtt_spt *s = NULL;
882 struct intel_vgpu_guest_page *g;
885 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
890 g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
892 s = guest_page_to_ppgtt_spt(g);
893 ppgtt_get_shadow_page(s);
895 int type = get_next_pt_type(we->type);
897 s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
903 ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page);
907 ret = ppgtt_populate_shadow_page(s);
911 trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn,
912 s->shadow_page.type);
916 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
917 vgpu->id, s, we->val64, we->type);
921 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
922 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
924 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
927 se->val64 = ge->val64;
929 ops->set_pfn(se, s->shadow_page.mfn);
932 static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
934 struct intel_vgpu *vgpu = spt->vgpu;
935 struct intel_vgpu_ppgtt_spt *s;
936 struct intel_gvt_gtt_entry se, ge;
940 trace_spt_change(spt->vgpu->id, "born", spt,
941 spt->guest_page.gfn, spt->shadow_page.type);
943 if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
944 for_each_present_guest_entry(spt, &ge, i) {
945 ret = gtt_entry_p2m(vgpu, &ge, &se);
948 ppgtt_set_shadow_entry(spt, &se, i);
953 for_each_present_guest_entry(spt, &ge, i) {
954 if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
955 gvt_err("GVT doesn't support pse bit now\n");
960 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
965 ppgtt_get_shadow_entry(spt, &se, i);
966 ppgtt_generate_shadow_entry(&se, s, &ge);
967 ppgtt_set_shadow_entry(spt, &se, i);
971 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
972 vgpu->id, spt, ge.val64, ge.type);
976 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
979 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
980 struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
981 struct intel_vgpu *vgpu = spt->vgpu;
982 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
983 struct intel_gvt_gtt_entry e;
986 ppgtt_get_shadow_entry(spt, &e, index);
988 trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, e.val64,
991 if (!ops->test_present(&e))
994 if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
997 if (gtt_type_is_pt(get_next_pt_type(e.type))) {
998 struct intel_vgpu_ppgtt_spt *s =
999 ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
1001 gvt_err("fail to find guest page\n");
1005 ret = ppgtt_invalidate_shadow_page(s);
1009 ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn);
1010 ppgtt_set_shadow_entry(spt, &e, index);
1013 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
1014 vgpu->id, spt, e.val64, e.type);
1018 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
1019 struct intel_gvt_gtt_entry *we, unsigned long index)
1021 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1022 struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
1023 struct intel_vgpu *vgpu = spt->vgpu;
1024 struct intel_gvt_gtt_entry m;
1025 struct intel_vgpu_ppgtt_spt *s;
1028 trace_gpt_change(spt->vgpu->id, "add", spt, sp->type,
1031 if (gtt_type_is_pt(get_next_pt_type(we->type))) {
1032 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
1037 ppgtt_get_shadow_entry(spt, &m, index);
1038 ppgtt_generate_shadow_entry(&m, s, we);
1039 ppgtt_set_shadow_entry(spt, &m, index);
1041 ret = gtt_entry_p2m(vgpu, we, &m);
1044 ppgtt_set_shadow_entry(spt, &m, index);
1048 gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id,
1049 spt, we->val64, we->type);
1053 static int sync_oos_page(struct intel_vgpu *vgpu,
1054 struct intel_vgpu_oos_page *oos_page)
1056 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1057 struct intel_gvt *gvt = vgpu->gvt;
1058 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1059 struct intel_vgpu_ppgtt_spt *spt =
1060 guest_page_to_ppgtt_spt(oos_page->guest_page);
1061 struct intel_gvt_gtt_entry old, new, m;
1065 trace_oos_change(vgpu->id, "sync", oos_page->id,
1066 oos_page->guest_page, spt->guest_page_type);
1068 old.type = new.type = get_entry_type(spt->guest_page_type);
1069 old.val64 = new.val64 = 0;
1071 for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift);
1073 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1074 ops->get_entry(NULL, &new, index, true,
1075 oos_page->guest_page->gfn << PAGE_SHIFT, vgpu);
1077 if (old.val64 == new.val64
1078 && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1081 trace_oos_sync(vgpu->id, oos_page->id,
1082 oos_page->guest_page, spt->guest_page_type,
1085 ret = gtt_entry_p2m(vgpu, &new, &m);
1089 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1090 ppgtt_set_shadow_entry(spt, &m, index);
1093 oos_page->guest_page->write_cnt = 0;
1094 list_del_init(&spt->post_shadow_list);
1098 static int detach_oos_page(struct intel_vgpu *vgpu,
1099 struct intel_vgpu_oos_page *oos_page)
1101 struct intel_gvt *gvt = vgpu->gvt;
1102 struct intel_vgpu_ppgtt_spt *spt =
1103 guest_page_to_ppgtt_spt(oos_page->guest_page);
1105 trace_oos_change(vgpu->id, "detach", oos_page->id,
1106 oos_page->guest_page, spt->guest_page_type);
1108 oos_page->guest_page->write_cnt = 0;
1109 oos_page->guest_page->oos_page = NULL;
1110 oos_page->guest_page = NULL;
1112 list_del_init(&oos_page->vm_list);
1113 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1118 static int attach_oos_page(struct intel_vgpu *vgpu,
1119 struct intel_vgpu_oos_page *oos_page,
1120 struct intel_vgpu_guest_page *gpt)
1122 struct intel_gvt *gvt = vgpu->gvt;
1125 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT,
1126 oos_page->mem, GTT_PAGE_SIZE);
1130 oos_page->guest_page = gpt;
1131 gpt->oos_page = oos_page;
1133 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1135 trace_oos_change(vgpu->id, "attach", gpt->oos_page->id,
1136 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1140 static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
1141 struct intel_vgpu_guest_page *gpt)
1145 ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt);
1149 trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id,
1150 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1152 list_del_init(&gpt->oos_page->vm_list);
1153 return sync_oos_page(vgpu, gpt->oos_page);
1156 static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
1157 struct intel_vgpu_guest_page *gpt)
1159 struct intel_gvt *gvt = vgpu->gvt;
1160 struct intel_gvt_gtt *gtt = &gvt->gtt;
1161 struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
1164 WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1166 if (list_empty(>t->oos_page_free_list_head)) {
1167 oos_page = container_of(gtt->oos_page_use_list_head.next,
1168 struct intel_vgpu_oos_page, list);
1169 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
1172 ret = detach_oos_page(vgpu, oos_page);
1176 oos_page = container_of(gtt->oos_page_free_list_head.next,
1177 struct intel_vgpu_oos_page, list);
1178 return attach_oos_page(vgpu, oos_page, gpt);
1181 static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
1182 struct intel_vgpu_guest_page *gpt)
1184 struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
1186 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1189 trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id,
1190 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1192 list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
1193 return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt);
1197 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1200 * This function is called before submitting a guest workload to host,
1201 * to sync all the out-of-synced shadow for vGPU
1204 * Zero on success, negative error code if failed.
1206 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1208 struct list_head *pos, *n;
1209 struct intel_vgpu_oos_page *oos_page;
1212 if (!enable_out_of_sync)
1215 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1216 oos_page = container_of(pos,
1217 struct intel_vgpu_oos_page, vm_list);
1218 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
1226 * The heart of PPGTT shadow page table.
1228 static int ppgtt_handle_guest_write_page_table(
1229 struct intel_vgpu_guest_page *gpt,
1230 struct intel_gvt_gtt_entry *we, unsigned long index)
1232 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1233 struct intel_vgpu *vgpu = spt->vgpu;
1234 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1239 new_present = ops->test_present(we);
1241 ret = ppgtt_handle_guest_entry_removal(gpt, index);
1246 ret = ppgtt_handle_guest_entry_add(gpt, we, index);
1252 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n",
1253 vgpu->id, spt, we->val64, we->type);
1257 static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt)
1259 return enable_out_of_sync
1260 && gtt_type_is_pte_pt(
1261 guest_page_to_ppgtt_spt(gpt)->guest_page_type)
1262 && gpt->write_cnt >= 2;
1265 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1266 unsigned long index)
1268 set_bit(index, spt->post_shadow_bitmap);
1269 if (!list_empty(&spt->post_shadow_list))
1272 list_add_tail(&spt->post_shadow_list,
1273 &spt->vgpu->gtt.post_shadow_list_head);
1277 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1280 * This function is called before submitting a guest workload to host,
1281 * to flush all the post shadows for a vGPU.
1284 * Zero on success, negative error code if failed.
1286 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1288 struct list_head *pos, *n;
1289 struct intel_vgpu_ppgtt_spt *spt;
1290 struct intel_gvt_gtt_entry ge;
1291 unsigned long index;
1294 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1295 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1298 for_each_set_bit(index, spt->post_shadow_bitmap,
1299 GTT_ENTRY_NUM_IN_ONE_PAGE) {
1300 ppgtt_get_guest_entry(spt, &ge, index);
1302 ret = ppgtt_handle_guest_write_page_table(
1303 &spt->guest_page, &ge, index);
1306 clear_bit(index, spt->post_shadow_bitmap);
1308 list_del_init(&spt->post_shadow_list);
1313 static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
1314 u64 pa, void *p_data, int bytes)
1316 struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
1317 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1318 struct intel_vgpu *vgpu = spt->vgpu;
1319 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1320 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1321 struct intel_gvt_gtt_entry we;
1322 unsigned long index;
1325 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1327 ppgtt_get_guest_entry(spt, &we, index);
1331 if (bytes == info->gtt_entry_size) {
1332 ret = ppgtt_handle_guest_write_page_table(gpt, &we, index);
1336 if (!test_bit(index, spt->post_shadow_bitmap)) {
1337 ret = ppgtt_handle_guest_entry_removal(gpt, index);
1342 ppgtt_set_post_shadow(spt, index);
1345 if (!enable_out_of_sync)
1351 ops->set_entry(gpt->oos_page->mem, &we, index,
1354 if (can_do_out_of_sync(gpt)) {
1356 ppgtt_allocate_oos_page(vgpu, gpt);
1358 ret = ppgtt_set_guest_page_oos(vgpu, gpt);
1366 * mm page table allocation policy for bdw+
1367 * - for ggtt, only virtual page table will be allocated.
1368 * - for ppgtt, dedicated virtual/shadow page table will be allocated.
1370 static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
1372 struct intel_vgpu *vgpu = mm->vgpu;
1373 struct intel_gvt *gvt = vgpu->gvt;
1374 const struct intel_gvt_device_info *info = &gvt->device_info;
1377 if (mm->type == INTEL_GVT_MM_PPGTT) {
1378 mm->page_table_entry_cnt = 4;
1379 mm->page_table_entry_size = mm->page_table_entry_cnt *
1380 info->gtt_entry_size;
1381 mem = kzalloc(mm->has_shadow_page_table ?
1382 mm->page_table_entry_size * 2
1383 : mm->page_table_entry_size,
1387 mm->virtual_page_table = mem;
1388 if (!mm->has_shadow_page_table)
1390 mm->shadow_page_table = mem + mm->page_table_entry_size;
1391 } else if (mm->type == INTEL_GVT_MM_GGTT) {
1392 mm->page_table_entry_cnt =
1393 (gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT);
1394 mm->page_table_entry_size = mm->page_table_entry_cnt *
1395 info->gtt_entry_size;
1396 mem = vzalloc(mm->page_table_entry_size);
1399 mm->virtual_page_table = mem;
1404 static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm)
1406 if (mm->type == INTEL_GVT_MM_PPGTT) {
1407 kfree(mm->virtual_page_table);
1408 } else if (mm->type == INTEL_GVT_MM_GGTT) {
1409 if (mm->virtual_page_table)
1410 vfree(mm->virtual_page_table);
1412 mm->virtual_page_table = mm->shadow_page_table = NULL;
1415 static void invalidate_mm(struct intel_vgpu_mm *mm)
1417 struct intel_vgpu *vgpu = mm->vgpu;
1418 struct intel_gvt *gvt = vgpu->gvt;
1419 struct intel_gvt_gtt *gtt = &gvt->gtt;
1420 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1421 struct intel_gvt_gtt_entry se;
1424 if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed))
1427 for (i = 0; i < mm->page_table_entry_cnt; i++) {
1428 ppgtt_get_shadow_root_entry(mm, &se, i);
1429 if (!ops->test_present(&se))
1431 ppgtt_invalidate_shadow_page_by_shadow_entry(
1434 ppgtt_set_shadow_root_entry(mm, &se, i);
1436 trace_gpt_change(vgpu->id, "destroy root pointer",
1437 NULL, se.type, se.val64, i);
1439 mm->shadowed = false;
1443 * intel_vgpu_destroy_mm - destroy a mm object
1444 * @mm: a kref object
1446 * This function is used to destroy a mm object for vGPU
1449 void intel_vgpu_destroy_mm(struct kref *mm_ref)
1451 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
1452 struct intel_vgpu *vgpu = mm->vgpu;
1453 struct intel_gvt *gvt = vgpu->gvt;
1454 struct intel_gvt_gtt *gtt = &gvt->gtt;
1456 if (!mm->initialized)
1459 list_del(&mm->list);
1460 list_del(&mm->lru_list);
1462 if (mm->has_shadow_page_table)
1465 gtt->mm_free_page_table(mm);
1470 static int shadow_mm(struct intel_vgpu_mm *mm)
1472 struct intel_vgpu *vgpu = mm->vgpu;
1473 struct intel_gvt *gvt = vgpu->gvt;
1474 struct intel_gvt_gtt *gtt = &gvt->gtt;
1475 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1476 struct intel_vgpu_ppgtt_spt *spt;
1477 struct intel_gvt_gtt_entry ge, se;
1481 if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed))
1484 mm->shadowed = true;
1486 for (i = 0; i < mm->page_table_entry_cnt; i++) {
1487 ppgtt_get_guest_root_entry(mm, &ge, i);
1488 if (!ops->test_present(&ge))
1491 trace_gpt_change(vgpu->id, __func__, NULL,
1492 ge.type, ge.val64, i);
1494 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
1496 gvt_err("fail to populate guest root pointer\n");
1500 ppgtt_generate_shadow_entry(&se, spt, &ge);
1501 ppgtt_set_shadow_root_entry(mm, &se, i);
1503 trace_gpt_change(vgpu->id, "populate root pointer",
1504 NULL, se.type, se.val64, i);
1513 * intel_vgpu_create_mm - create a mm object for a vGPU
1515 * @mm_type: mm object type, should be PPGTT or GGTT
1516 * @virtual_page_table: page table root pointers. Could be NULL if user wants
1517 * to populate shadow later.
1518 * @page_table_level: describe the page table level of the mm object
1519 * @pde_base_index: pde root pointer base in GGTT MMIO.
1521 * This function is used to create a mm object for a vGPU.
1524 * Zero on success, negative error code in pointer if failed.
1526 struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1527 int mm_type, void *virtual_page_table, int page_table_level,
1530 struct intel_gvt *gvt = vgpu->gvt;
1531 struct intel_gvt_gtt *gtt = &gvt->gtt;
1532 struct intel_vgpu_mm *mm;
1535 mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
1543 if (page_table_level == 1)
1544 mm->page_table_entry_type = GTT_TYPE_GGTT_PTE;
1545 else if (page_table_level == 3)
1546 mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1547 else if (page_table_level == 4)
1548 mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1555 mm->page_table_level = page_table_level;
1556 mm->pde_base_index = pde_base_index;
1559 mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT);
1561 kref_init(&mm->ref);
1562 atomic_set(&mm->pincount, 0);
1563 INIT_LIST_HEAD(&mm->list);
1564 INIT_LIST_HEAD(&mm->lru_list);
1565 list_add_tail(&mm->list, &vgpu->gtt.mm_list_head);
1567 ret = gtt->mm_alloc_page_table(mm);
1569 gvt_err("fail to allocate page table for mm\n");
1573 mm->initialized = true;
1575 if (virtual_page_table)
1576 memcpy(mm->virtual_page_table, virtual_page_table,
1577 mm->page_table_entry_size);
1579 if (mm->has_shadow_page_table) {
1580 ret = shadow_mm(mm);
1583 list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
1587 gvt_err("fail to create mm\n");
1589 intel_gvt_mm_unreference(mm);
1590 return ERR_PTR(ret);
1594 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1595 * @mm: a vGPU mm object
1597 * This function is called when user doesn't want to use a vGPU mm object
1599 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1601 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
1604 atomic_dec(&mm->pincount);
1608 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
1611 * This function is called when user wants to use a vGPU mm object. If this
1612 * mm object hasn't been shadowed yet, the shadow will be populated at this
1616 * Zero on success, negative error code if failed.
1618 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
1622 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
1625 atomic_inc(&mm->pincount);
1627 if (!mm->shadowed) {
1628 ret = shadow_mm(mm);
1633 list_del_init(&mm->lru_list);
1634 list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
1638 static int reclaim_one_mm(struct intel_gvt *gvt)
1640 struct intel_vgpu_mm *mm;
1641 struct list_head *pos, *n;
1643 list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
1644 mm = container_of(pos, struct intel_vgpu_mm, lru_list);
1646 if (mm->type != INTEL_GVT_MM_PPGTT)
1648 if (atomic_read(&mm->pincount))
1651 list_del_init(&mm->lru_list);
1659 * GMA translation APIs.
1661 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
1662 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
1664 struct intel_vgpu *vgpu = mm->vgpu;
1665 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1666 struct intel_vgpu_ppgtt_spt *s;
1668 if (WARN_ON(!mm->has_shadow_page_table))
1671 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
1676 ppgtt_get_shadow_entry(s, e, index);
1678 ppgtt_get_guest_entry(s, e, index);
1683 * intel_vgpu_gma_to_gpa - translate a gma to GPA
1684 * @mm: mm object. could be a PPGTT or GGTT mm object
1685 * @gma: graphics memory address in this mm object
1687 * This function is used to translate a graphics memory address in specific
1688 * graphics memory space to guest physical address.
1691 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
1693 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
1695 struct intel_vgpu *vgpu = mm->vgpu;
1696 struct intel_gvt *gvt = vgpu->gvt;
1697 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
1698 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
1699 unsigned long gpa = INTEL_GVT_INVALID_ADDR;
1700 unsigned long gma_index[4];
1701 struct intel_gvt_gtt_entry e;
1705 if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT)
1706 return INTEL_GVT_INVALID_ADDR;
1708 if (mm->type == INTEL_GVT_MM_GGTT) {
1709 if (!vgpu_gmadr_is_valid(vgpu, gma))
1712 ggtt_get_guest_entry(mm, &e,
1713 gma_ops->gma_to_ggtt_pte_index(gma));
1714 gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
1715 + (gma & ~GTT_PAGE_MASK);
1717 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
1721 switch (mm->page_table_level) {
1723 ppgtt_get_shadow_root_entry(mm, &e, 0);
1724 gma_index[0] = gma_ops->gma_to_pml4_index(gma);
1725 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
1726 gma_index[2] = gma_ops->gma_to_pde_index(gma);
1727 gma_index[3] = gma_ops->gma_to_pte_index(gma);
1731 ppgtt_get_shadow_root_entry(mm, &e,
1732 gma_ops->gma_to_l3_pdp_index(gma));
1733 gma_index[0] = gma_ops->gma_to_pde_index(gma);
1734 gma_index[1] = gma_ops->gma_to_pte_index(gma);
1738 ppgtt_get_shadow_root_entry(mm, &e,
1739 gma_ops->gma_to_pde_index(gma));
1740 gma_index[0] = gma_ops->gma_to_pte_index(gma);
1748 /* walk into the shadow page table and get gpa from guest entry */
1749 for (i = 0; i < index; i++) {
1750 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
1756 gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
1757 + (gma & ~GTT_PAGE_MASK);
1759 trace_gma_translate(vgpu->id, "ppgtt", 0,
1760 mm->page_table_level, gma, gpa);
1763 gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma);
1764 return INTEL_GVT_INVALID_ADDR;
1767 static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
1768 unsigned int off, void *p_data, unsigned int bytes)
1770 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1771 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1772 unsigned long index = off >> info->gtt_entry_size_shift;
1773 struct intel_gvt_gtt_entry e;
1775 if (bytes != 4 && bytes != 8)
1778 ggtt_get_guest_entry(ggtt_mm, &e, index);
1779 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
1785 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
1787 * @off: register offset
1788 * @p_data: data will be returned to guest
1789 * @bytes: data length
1791 * This function is used to emulate the GTT MMIO register read
1794 * Zero on success, error code if failed.
1796 int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
1797 void *p_data, unsigned int bytes)
1799 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1802 if (bytes != 4 && bytes != 8)
1805 off -= info->gtt_start_offset;
1806 ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes);
1810 static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1811 void *p_data, unsigned int bytes)
1813 struct intel_gvt *gvt = vgpu->gvt;
1814 const struct intel_gvt_device_info *info = &gvt->device_info;
1815 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1816 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1817 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
1819 struct intel_gvt_gtt_entry e, m;
1822 if (bytes != 4 && bytes != 8)
1825 gma = g_gtt_index << GTT_PAGE_SHIFT;
1827 /* the VM may configure the whole GM space when ballooning is used */
1828 if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma),
1829 "vgpu%d: found oob ggtt write, offset %x\n",
1834 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
1836 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
1839 if (ops->test_present(&e)) {
1840 ret = gtt_entry_p2m(vgpu, &e, &m);
1842 gvt_err("vgpu%d: fail to translate guest gtt entry\n",
1851 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
1852 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
1857 * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
1859 * @off: register offset
1860 * @p_data: data from guest write
1861 * @bytes: data length
1863 * This function is used to emulate the GTT MMIO register write
1866 * Zero on success, error code if failed.
1868 int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1869 void *p_data, unsigned int bytes)
1871 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1874 if (bytes != 4 && bytes != 8)
1877 off -= info->gtt_start_offset;
1878 ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes);
1882 static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1883 intel_gvt_gtt_type_t type)
1885 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
1886 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1887 int page_entry_num = GTT_PAGE_SIZE >>
1888 vgpu->gvt->device_info.gtt_entry_size_shift;
1889 struct page *scratch_pt;
1894 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
1897 scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
1899 gvt_err("fail to allocate scratch page\n");
1903 p = kmap_atomic(scratch_pt);
1904 mfn = intel_gvt_hypervisor_virt_to_mfn(p);
1905 if (mfn == INTEL_GVT_INVALID_ADDR) {
1906 gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
1908 __free_page(scratch_pt);
1911 gtt->scratch_pt[type].page_mfn = mfn;
1912 gtt->scratch_pt[type].page = scratch_pt;
1913 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
1914 vgpu->id, type, mfn);
1916 /* Build the tree by full filled the scratch pt with the entries which
1917 * point to the next level scratch pt or scratch page. The
1918 * scratch_pt[type] indicate the scratch pt/scratch page used by the
1920 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
1921 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
1922 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
1924 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
1925 struct intel_gvt_gtt_entry se;
1927 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
1928 se.type = get_entry_type(type - 1);
1929 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
1931 /* The entry parameters like present/writeable/cache type
1932 * set to the same as i915's scratch page tree.
1934 se.val64 |= _PAGE_PRESENT | _PAGE_RW;
1935 if (type == GTT_TYPE_PPGTT_PDE_PT)
1936 se.val64 |= PPAT_CACHED_INDEX;
1938 for (i = 0; i < page_entry_num; i++)
1939 ops->set_entry(p, &se, i, false, 0, vgpu);
1947 static int release_scratch_page_tree(struct intel_vgpu *vgpu)
1951 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
1952 if (vgpu->gtt.scratch_pt[i].page != NULL) {
1953 __free_page(vgpu->gtt.scratch_pt[i].page);
1954 vgpu->gtt.scratch_pt[i].page = NULL;
1955 vgpu->gtt.scratch_pt[i].page_mfn = 0;
1962 static int create_scratch_page_tree(struct intel_vgpu *vgpu)
1966 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
1967 ret = alloc_scratch_pages(vgpu, i);
1975 release_scratch_page_tree(vgpu);
1980 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
1983 * This function is used to initialize per-vGPU graphics memory virtualization
1987 * Zero on success, error code if failed.
1989 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
1991 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
1992 struct intel_vgpu_mm *ggtt_mm;
1994 hash_init(gtt->guest_page_hash_table);
1995 hash_init(gtt->shadow_page_hash_table);
1997 INIT_LIST_HEAD(>t->mm_list_head);
1998 INIT_LIST_HEAD(>t->oos_page_list_head);
1999 INIT_LIST_HEAD(>t->post_shadow_list_head);
2001 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
2003 if (IS_ERR(ggtt_mm)) {
2004 gvt_err("fail to create mm for ggtt.\n");
2005 return PTR_ERR(ggtt_mm);
2008 gtt->ggtt_mm = ggtt_mm;
2010 return create_scratch_page_tree(vgpu);
2014 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2017 * This function is used to clean up per-vGPU graphics memory virtualization
2021 * Zero on success, error code if failed.
2023 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2025 struct list_head *pos, *n;
2026 struct intel_vgpu_mm *mm;
2028 ppgtt_free_all_shadow_page(vgpu);
2029 release_scratch_page_tree(vgpu);
2031 list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
2032 mm = container_of(pos, struct intel_vgpu_mm, list);
2033 vgpu->gvt->gtt.mm_free_page_table(mm);
2034 list_del(&mm->list);
2035 list_del(&mm->lru_list);
2040 static void clean_spt_oos(struct intel_gvt *gvt)
2042 struct intel_gvt_gtt *gtt = &gvt->gtt;
2043 struct list_head *pos, *n;
2044 struct intel_vgpu_oos_page *oos_page;
2046 WARN(!list_empty(>t->oos_page_use_list_head),
2047 "someone is still using oos page\n");
2049 list_for_each_safe(pos, n, >t->oos_page_free_list_head) {
2050 oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2051 list_del(&oos_page->list);
2056 static int setup_spt_oos(struct intel_gvt *gvt)
2058 struct intel_gvt_gtt *gtt = &gvt->gtt;
2059 struct intel_vgpu_oos_page *oos_page;
2063 INIT_LIST_HEAD(>t->oos_page_free_list_head);
2064 INIT_LIST_HEAD(>t->oos_page_use_list_head);
2066 for (i = 0; i < preallocated_oos_pages; i++) {
2067 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2069 gvt_err("fail to pre-allocate oos page\n");
2074 INIT_LIST_HEAD(&oos_page->list);
2075 INIT_LIST_HEAD(&oos_page->vm_list);
2077 list_add_tail(&oos_page->list, >t->oos_page_free_list_head);
2080 gvt_dbg_mm("%d oos pages preallocated\n", i);
2089 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2091 * @page_table_level: PPGTT page table level
2092 * @root_entry: PPGTT page table root pointers
2094 * This function is used to find a PPGTT mm object from mm object pool
2097 * pointer to mm object on success, NULL if failed.
2099 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2100 int page_table_level, void *root_entry)
2102 struct list_head *pos;
2103 struct intel_vgpu_mm *mm;
2106 list_for_each(pos, &vgpu->gtt.mm_list_head) {
2107 mm = container_of(pos, struct intel_vgpu_mm, list);
2108 if (mm->type != INTEL_GVT_MM_PPGTT)
2111 if (mm->page_table_level != page_table_level)
2115 dst = mm->virtual_page_table;
2117 if (page_table_level == 3) {
2118 if (src[0] == dst[0]
2121 && src[3] == dst[3])
2124 if (src[0] == dst[0])
2132 * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
2135 * @page_table_level: PPGTT page table level
2137 * This function is used to create a PPGTT mm object from a guest to GVT-g
2141 * Zero on success, negative error code if failed.
2143 int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
2144 int page_table_level)
2146 u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
2147 struct intel_vgpu_mm *mm;
2149 if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
2152 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
2154 intel_gvt_mm_reference(mm);
2156 mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
2157 pdp, page_table_level, 0);
2159 gvt_err("fail to create mm\n");
2167 * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
2170 * @page_table_level: PPGTT page table level
2172 * This function is used to create a PPGTT mm object from a guest to GVT-g
2176 * Zero on success, negative error code if failed.
2178 int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
2179 int page_table_level)
2181 u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
2182 struct intel_vgpu_mm *mm;
2184 if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
2187 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
2189 gvt_err("fail to find ppgtt instance.\n");
2192 intel_gvt_mm_unreference(mm);
2197 * intel_gvt_init_gtt - initialize mm components of a GVT device
2200 * This function is called at the initialization stage, to initialize
2201 * the mm components of a GVT device.
2204 * zero on success, negative error code if failed.
2206 int intel_gvt_init_gtt(struct intel_gvt *gvt)
2210 gvt_dbg_core("init gtt\n");
2212 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
2213 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2214 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2215 gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
2216 gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
2221 if (enable_out_of_sync) {
2222 ret = setup_spt_oos(gvt);
2224 gvt_err("fail to initialize SPT oos\n");
2228 INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
2233 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2236 * This function is called at the driver unloading stage, to clean up the
2237 * the mm components of a GVT device.
2240 void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2242 if (enable_out_of_sync)