| X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
| X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
| X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
- | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
+ | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP))
#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
const u32 kvm_supported_word9_x86_features =
F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
- F(ADX);
+ F(ADX) | F(SMAP);
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
return best && (best->ebx & bit(X86_FEATURE_SMEP));
}
+static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->ebx & bit(X86_FEATURE_SMAP));
+}
+
static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
}
}
-static void update_permission_bitmask(struct kvm_vcpu *vcpu,
+void update_permission_bitmask(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu, bool ept)
{
unsigned bit, byte, pfec;
u8 map;
- bool fault, x, w, u, wf, uf, ff, smep;
+ bool fault, x, w, u, wf, uf, ff, smapf, cr4_smap, cr4_smep, smap = 0;
- smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
+ cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
+ cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
pfec = byte << 1;
map = 0;
wf = pfec & PFERR_WRITE_MASK;
uf = pfec & PFERR_USER_MASK;
ff = pfec & PFERR_FETCH_MASK;
+ /*
+ * PFERR_RSVD_MASK bit is set in PFEC if the access is not
+ * subject to SMAP restrictions, and cleared otherwise. The
+ * bit is only meaningful if the SMAP bit is set in CR4.
+ */
+ smapf = !(pfec & PFERR_RSVD_MASK);
for (bit = 0; bit < 8; ++bit) {
x = bit & ACC_EXEC_MASK;
w = bit & ACC_WRITE_MASK;
/* Allow supervisor writes if !cr0.wp */
w |= !is_write_protection(vcpu) && !uf;
/* Disallow supervisor fetches of user code if cr4.smep */
- x &= !(smep && u && !uf);
+ x &= !(cr4_smep && u && !uf);
+
+ /*
+ * SMAP:kernel-mode data accesses from user-mode
+ * mappings should fault. A fault is considered
+ * as a SMAP violation if all of the following
+ * conditions are ture:
+ * - X86_CR4_SMAP is set in CR4
+ * - An user page is accessed
+ * - Page fault in kernel mode
+ * - if CPL = 3 or X86_EFLAGS_AC is clear
+ *
+ * Here, we cover the first three conditions.
+ * The fourth is computed dynamically in
+ * permission_fault() and is in smapf.
+ *
+ * Also, SMAP does not affect instruction
+ * fetches, add the !ff check here to make it
+ * clearer.
+ */
+ smap = cr4_smap && u && !uf && !ff;
} else
/* Not really needed: no U/S accesses on ept */
u = 1;
- fault = (ff && !x) || (uf && !u) || (wf && !w);
+ fault = (ff && !x) || (uf && !u) || (wf && !w) ||
+ (smapf && smap);
map |= fault << bit;
}
mmu->permissions[byte] = map;
#define PT_DIRECTORY_LEVEL 2
#define PT_PAGE_TABLE_LEVEL 1
-#define PFERR_PRESENT_MASK (1U << 0)
-#define PFERR_WRITE_MASK (1U << 1)
-#define PFERR_USER_MASK (1U << 2)
-#define PFERR_RSVD_MASK (1U << 3)
-#define PFERR_FETCH_MASK (1U << 4)
+#define PFERR_PRESENT_BIT 0
+#define PFERR_WRITE_BIT 1
+#define PFERR_USER_BIT 2
+#define PFERR_RSVD_BIT 3
+#define PFERR_FETCH_BIT 4
+
+#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
+#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
+#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
+#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
+#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
bool execonly);
+void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ bool ept);
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
{
* Will a fault with a given page-fault error code (pfec) cause a permission
* fault with the given access (in ACC_* format)?
*/
-static inline bool permission_fault(struct kvm_mmu *mmu, unsigned pte_access,
- unsigned pfec)
+static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ unsigned pte_access, unsigned pfec)
{
- return (mmu->permissions[pfec >> 1] >> pte_access) & 1;
+ int cpl = kvm_x86_ops->get_cpl(vcpu);
+ unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
+
+ /*
+ * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
+ *
+ * If CPL = 3, SMAP applies to all supervisor-mode data accesses
+ * (these are implicit supervisor accesses) regardless of the value
+ * of EFLAGS.AC.
+ *
+ * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
+ * the result in X86_EFLAGS_AC. We then insert it in place of
+ * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
+ * but it will be one in index if SMAP checks are being overridden.
+ * It is important to keep this branchless.
+ */
+ unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
+ int index = (pfec >> 1) +
+ (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
+
+ return (mmu->permissions[index] >> pte_access) & 1;
}
void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
walker->ptes[walker->level - 1] = pte;
} while (!is_last_gpte(mmu, walker->level, pte));
- if (unlikely(permission_fault(mmu, pte_access, access))) {
+ if (unlikely(permission_fault(vcpu, mmu, pte_access, access))) {
errcode |= PFERR_PRESENT_MASK;
goto error;
}
hw_cr4 &= ~X86_CR4_PAE;
hw_cr4 |= X86_CR4_PSE;
/*
- * SMEP is disabled if CPU is in non-paging mode in
- * hardware. However KVM always uses paging mode to
+ * SMEP/SMAP is disabled if CPU is in non-paging mode
+ * in hardware. However KVM always uses paging mode to
* emulate guest non-paging mode with TDP.
- * To emulate this behavior, SMEP needs to be manually
- * disabled when guest switches to non-paging mode.
+ * To emulate this behavior, SMEP/SMAP needs to be
+ * manually disabled when guest switches to non-paging
+ * mode.
*/
- hw_cr4 &= ~X86_CR4_SMEP;
+ hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
} else if (!(cr4 & X86_CR4_PAE)) {
hw_cr4 &= ~X86_CR4_PAE;
}
if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
return 1;
+ if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP))
+ return 1;
+
if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
return 1;
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu);
+ if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
+ update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
+
if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
kvm_update_cpuid(vcpu);
{
struct timespec ts;
- WARN_ON(preemptible());
ktime_get_ts(&ts);
monotonic_to_bootbased(&ts);
return timespec_to_ns(&ts);
| (write ? PFERR_WRITE_MASK : 0);
if (vcpu_match_mmio_gva(vcpu, gva)
- && !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) {
+ && !permission_fault(vcpu, vcpu->arch.walk_mmu,
+ vcpu->arch.access, access)) {
*gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
(gva & (PAGE_SIZE - 1));
trace_vcpu_match_mmio(gva, *gpa, write, false);
}
bcm2835_rng_ops.priv = (unsigned long)rng_base;
+ /* set warm-up count & enable */
+ __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
+ __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
+
/* register driver */
err = hwrng_register(&bcm2835_rng_ops);
if (err) {
dev_err(dev, "hwrng registration failed\n");
iounmap(rng_base);
- } else {
+ } else
dev_info(dev, "hwrng registered\n");
- /* set warm-up count & enable */
- __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
- __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
- }
return err;
}
return 0;
}
- WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
- node->start, node->size);
return -ENOSPC;
}
EXPORT_SYMBOL(drm_mm_reserve_node);
struct {
u16 pwm_freq_hz;
+ bool present;
bool active_low_pwm;
} backlight;
int i915_gem_context_enable(struct drm_i915_private *dev_priv);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct intel_ring_buffer *ring,
- struct drm_file *file, struct i915_hw_context *to);
+ struct i915_hw_context *to);
struct i915_hw_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
void i915_gem_context_free(struct kref *ctx_ref);
static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
{
- if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev))
- kref_get(&ctx->ref);
+ kref_get(&ctx->ref);
}
static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
{
- if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev))
- kref_put(&ctx->ref, i915_gem_context_free);
+ kref_put(&ctx->ref, i915_gem_context_free);
}
static inline bool i915_gem_context_is_default(const struct i915_hw_context *c)
/* Flush everything onto the inactive list. */
for_each_ring(ring, dev_priv, i) {
- ret = i915_switch_context(ring, NULL, ring->default_context);
+ ret = i915_switch_context(ring, ring->default_context);
if (ret)
return ret;
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
-static int do_switch(struct intel_ring_buffer *ring,
- struct i915_hw_context *to);
-
static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
typeof(*ctx), ref);
struct i915_hw_ppgtt *ppgtt = NULL;
- /* We refcount even the aliasing PPGTT to keep the code symmetric */
- if (USES_PPGTT(ctx->obj->base.dev))
- ppgtt = ctx_to_ppgtt(ctx);
+ if (ctx->obj) {
+ /* We refcount even the aliasing PPGTT to keep the code symmetric */
+ if (USES_PPGTT(ctx->obj->base.dev))
+ ppgtt = ctx_to_ppgtt(ctx);
- /* XXX: Free up the object before tearing down the address space, in
- * case we're bound in the PPGTT */
- drm_gem_object_unreference(&ctx->obj->base);
+ /* XXX: Free up the object before tearing down the address space, in
+ * case we're bound in the PPGTT */
+ drm_gem_object_unreference(&ctx->obj->base);
+ }
if (ppgtt)
kref_put(&ppgtt->ref, ppgtt_release);
return ERR_PTR(-ENOMEM);
kref_init(&ctx->ref);
- ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
- INIT_LIST_HEAD(&ctx->link);
- if (ctx->obj == NULL) {
- kfree(ctx);
- DRM_DEBUG_DRIVER("Context object allocated failed\n");
- return ERR_PTR(-ENOMEM);
- }
+ list_add_tail(&ctx->link, &dev_priv->context_list);
- if (INTEL_INFO(dev)->gen >= 7) {
- ret = i915_gem_object_set_cache_level(ctx->obj,
- I915_CACHE_L3_LLC);
- /* Failure shouldn't ever happen this early */
- if (WARN_ON(ret))
+ if (dev_priv->hw_context_size) {
+ ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
+ if (ctx->obj == NULL) {
+ ret = -ENOMEM;
goto err_out;
- }
+ }
- list_add_tail(&ctx->link, &dev_priv->context_list);
+ if (INTEL_INFO(dev)->gen >= 7) {
+ ret = i915_gem_object_set_cache_level(ctx->obj,
+ I915_CACHE_L3_LLC);
+ /* Failure shouldn't ever happen this early */
+ if (WARN_ON(ret))
+ goto err_out;
+ }
+ }
/* Default context will never have a file_priv */
- if (file_priv == NULL)
- return ctx;
-
- ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID, 0,
- GFP_KERNEL);
- if (ret < 0)
- goto err_out;
+ if (file_priv != NULL) {
+ ret = idr_alloc(&file_priv->context_idr, ctx,
+ DEFAULT_CONTEXT_ID, 0, GFP_KERNEL);
+ if (ret < 0)
+ goto err_out;
+ } else
+ ret = DEFAULT_CONTEXT_ID;
ctx->file_priv = file_priv;
ctx->id = ret;
if (IS_ERR(ctx))
return ctx;
- if (is_global_default_ctx) {
+ if (is_global_default_ctx && ctx->obj) {
/* We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
return ctx;
err_unpin:
- if (is_global_default_ctx)
+ if (is_global_default_ctx && ctx->obj)
i915_gem_object_ggtt_unpin(ctx->obj);
err_destroy:
i915_gem_context_unreference(ctx);
void i915_gem_context_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
int i;
- if (!HAS_HW_CONTEXTS(dev))
- return;
-
/* Prevent the hardware from restoring the last context (which hung) on
* the next switch */
for (i = 0; i < I915_NUM_RINGS; i++) {
- struct i915_hw_context *dctx;
- if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
- continue;
+ struct intel_ring_buffer *ring = &dev_priv->ring[i];
+ struct i915_hw_context *dctx = ring->default_context;
/* Do a fake switch to the default context */
- ring = &dev_priv->ring[i];
- dctx = ring->default_context;
- if (WARN_ON(!dctx))
+ if (ring->last_context == dctx)
continue;
if (!ring->last_context)
continue;
- if (ring->last_context == dctx)
- continue;
-
- if (i == RCS) {
+ if (dctx->obj && i == RCS) {
WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
get_context_alignment(dev), 0));
/* Fake a finish/inactive */
int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct i915_hw_context *ctx;
int i;
- if (!HAS_HW_CONTEXTS(dev))
- return 0;
-
/* Init should only be called once per module load. Eventually the
* restriction on the context_disabled check can be loosened. */
if (WARN_ON(dev_priv->ring[RCS].default_context))
return 0;
- dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
-
- if (dev_priv->hw_context_size > (1<<20)) {
- DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
- return -E2BIG;
+ if (HAS_HW_CONTEXTS(dev)) {
+ dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
+ if (dev_priv->hw_context_size > (1<<20)) {
+ DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
+ dev_priv->hw_context_size);
+ dev_priv->hw_context_size = 0;
+ }
}
- dev_priv->ring[RCS].default_context =
- i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
-
- if (IS_ERR_OR_NULL(dev_priv->ring[RCS].default_context)) {
- DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %ld\n",
- PTR_ERR(dev_priv->ring[RCS].default_context));
- return PTR_ERR(dev_priv->ring[RCS].default_context);
+ ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
+ if (IS_ERR(ctx)) {
+ DRM_ERROR("Failed to create default global context (error %ld)\n",
+ PTR_ERR(ctx));
+ return PTR_ERR(ctx);
}
- for (i = RCS + 1; i < I915_NUM_RINGS; i++) {
- if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
- continue;
-
- ring = &dev_priv->ring[i];
+ /* NB: RCS will hold a ref for all rings */
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ dev_priv->ring[i].default_context = ctx;
- /* NB: RCS will hold a ref for all rings */
- ring->default_context = dev_priv->ring[RCS].default_context;
- }
-
- DRM_DEBUG_DRIVER("HW context support initialized\n");
+ DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake");
return 0;
}
struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
int i;
- if (!HAS_HW_CONTEXTS(dev))
- return;
-
- /* The only known way to stop the gpu from accessing the hw context is
- * to reset it. Do this as the very last operation to avoid confusing
- * other code, leading to spurious errors. */
- intel_gpu_reset(dev);
-
- /* When default context is created and switched to, base object refcount
- * will be 2 (+1 from object creation and +1 from do_switch()).
- * i915_gem_context_fini() will be called after gpu_idle() has switched
- * to default context. So we need to unreference the base object once
- * to offset the do_switch part, so that i915_gem_context_unreference()
- * can then free the base object correctly. */
- WARN_ON(!dev_priv->ring[RCS].last_context);
- if (dev_priv->ring[RCS].last_context == dctx) {
- /* Fake switch to NULL context */
- WARN_ON(dctx->obj->active);
- i915_gem_object_ggtt_unpin(dctx->obj);
- i915_gem_context_unreference(dctx);
- dev_priv->ring[RCS].last_context = NULL;
+ if (dctx->obj) {
+ /* The only known way to stop the gpu from accessing the hw context is
+ * to reset it. Do this as the very last operation to avoid confusing
+ * other code, leading to spurious errors. */
+ intel_gpu_reset(dev);
+
+ /* When default context is created and switched to, base object refcount
+ * will be 2 (+1 from object creation and +1 from do_switch()).
+ * i915_gem_context_fini() will be called after gpu_idle() has switched
+ * to default context. So we need to unreference the base object once
+ * to offset the do_switch part, so that i915_gem_context_unreference()
+ * can then free the base object correctly. */
+ WARN_ON(!dev_priv->ring[RCS].last_context);
+ if (dev_priv->ring[RCS].last_context == dctx) {
+ /* Fake switch to NULL context */
+ WARN_ON(dctx->obj->active);
+ i915_gem_object_ggtt_unpin(dctx->obj);
+ i915_gem_context_unreference(dctx);
+ dev_priv->ring[RCS].last_context = NULL;
+ }
}
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_ring_buffer *ring = &dev_priv->ring[i];
- if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
- continue;
if (ring->last_context)
i915_gem_context_unreference(ring->last_context);
i915_gem_object_ggtt_unpin(dctx->obj);
i915_gem_context_unreference(dctx);
- dev_priv->mm.aliasing_ppgtt = NULL;
}
int i915_gem_context_enable(struct drm_i915_private *dev_priv)
struct intel_ring_buffer *ring;
int ret, i;
- if (!HAS_HW_CONTEXTS(dev_priv->dev))
- return 0;
-
/* This is the only place the aliasing PPGTT gets enabled, which means
* it has to happen before we bail on reset */
if (dev_priv->mm.aliasing_ppgtt) {
BUG_ON(!dev_priv->ring[RCS].default_context);
for_each_ring(ring, dev_priv, i) {
- ret = do_switch(ring, ring->default_context);
+ ret = i915_switch_context(ring, ring->default_context);
if (ret)
return ret;
}
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!HAS_HW_CONTEXTS(dev)) {
- /* Cheat for hang stats */
- file_priv->private_default_ctx =
- kzalloc(sizeof(struct i915_hw_context), GFP_KERNEL);
-
- if (file_priv->private_default_ctx == NULL)
- return -ENOMEM;
-
- file_priv->private_default_ctx->vm = &dev_priv->gtt.base;
- return 0;
- }
idr_init(&file_priv->context_idr);
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- if (!HAS_HW_CONTEXTS(dev)) {
- kfree(file_priv->private_default_ctx);
- return;
- }
-
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
- i915_gem_context_unreference(file_priv->private_default_ctx);
idr_destroy(&file_priv->context_idr);
+
+ i915_gem_context_unreference(file_priv->private_default_ctx);
}
struct i915_hw_context *
{
struct i915_hw_context *ctx;
- if (!HAS_HW_CONTEXTS(file_priv->dev_priv->dev))
- return file_priv->private_default_ctx;
-
ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
if (!ctx)
return ERR_PTR(-ENOENT);
/**
* i915_switch_context() - perform a GPU context switch.
* @ring: ring for which we'll execute the context switch
- * @file_priv: file_priv associated with the context, may be NULL
* @to: the context to switch to
*
* The context life cycle is simple. The context refcount is incremented and
* object while letting the normal object tracking destroy the backing BO.
*/
int i915_switch_context(struct intel_ring_buffer *ring,
- struct drm_file *file,
struct i915_hw_context *to)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
- BUG_ON(file && to == NULL);
-
- /* We have the fake context */
- if (!HAS_HW_CONTEXTS(ring->dev)) {
- ring->last_context = to;
+ if (to->obj == NULL) { /* We have the fake context */
+ if (to != ring->last_context) {
+ i915_gem_context_reference(to);
+ if (ring->last_context)
+ i915_gem_context_unreference(ring->last_context);
+ ring->last_context = to;
+ }
return 0;
}
return do_switch(ring, to);
}
+static bool hw_context_enabled(struct drm_device *dev)
+{
+ return to_i915(dev)->hw_context_size;
+}
+
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct i915_hw_context *ctx;
int ret;
- if (!HAS_HW_CONTEXTS(dev))
+ if (!hw_context_enabled(dev))
return -ENODEV;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto err;
- ret = i915_switch_context(ring, file, ctx);
+ ret = i915_switch_context(ring, ctx);
if (ret)
goto err;
const struct bdb_lfp_backlight_data *backlight_data;
const struct bdb_lfp_backlight_data_entry *entry;
+ /* Err to enabling backlight if no backlight block. */
+ dev_priv->vbt.backlight.present = true;
+
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
if (!backlight_data)
return;
entry = &backlight_data->data[panel_type];
+ dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
+ if (!dev_priv->vbt.backlight.present) {
+ DRM_DEBUG_KMS("PWM backlight not present in VBT (type %u)\n",
+ entry->type);
+ return;
+ }
+
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
struct bdb_lvds_lfp_data_entry data[16];
} __packed;
+#define BDB_BACKLIGHT_TYPE_NONE 0
+#define BDB_BACKLIGHT_TYPE_PWM 2
+
struct bdb_lfp_backlight_data_entry {
u8 type:2;
u8 active_low_pwm:1;
unsigned long flags;
int ret;
+ if (!dev_priv->vbt.backlight.present) {
+ DRM_DEBUG_KMS("native backlight control not available per VBT\n");
+ return 0;
+ }
+
/* set level and max in panel struct */
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
ret = dev_priv->display.setup_backlight(intel_connector);
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+ if (IS_I915GM(dev) && enabled) {
+ struct intel_framebuffer *fb;
+
+ fb = to_intel_framebuffer(enabled->primary->fb);
+
+ /* self-refresh seems busted with untiled */
+ if (fb->obj->tiling_mode == I915_TILING_NONE)
+ enabled = NULL;
+ }
+
/*
* Overlay gets an aggressive default since video jitter is bad.
*/
/* Find the matching extent */
extents = map->nr_extents;
- smp_read_barrier_depends();
+ smp_rmb();
for (idx = 0; idx < extents; idx++) {
first = map->extent[idx].first;
last = first + map->extent[idx].count - 1;
/* Find the matching extent */
extents = map->nr_extents;
- smp_read_barrier_depends();
+ smp_rmb();
for (idx = 0; idx < extents; idx++) {
first = map->extent[idx].first;
last = first + map->extent[idx].count - 1;
/* Find the matching extent */
extents = map->nr_extents;
- smp_read_barrier_depends();
+ smp_rmb();
for (idx = 0; idx < extents; idx++) {
first = map->extent[idx].lower_first;
last = first + map->extent[idx].count - 1;
* were written before the count of the extents.
*
* To achieve this smp_wmb() is used on guarantee the write
- * order and smp_read_barrier_depends() is guaranteed that we
- * don't have crazy architectures returning stale data.
- *
+ * order and smp_rmb() is guaranteed that we don't have crazy
+ * architectures returning stale data.
*/
mutex_lock(&id_map_mutex);
bitmap_zero(ioapic->rtc_status.dest_map, KVM_MAX_VCPUS);
}
+static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
+
+static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
+{
+ if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
+ kvm_rtc_eoi_tracking_restore_all(ioapic);
+}
+
static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
{
bool new_val, old_val;
} else {
__clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
ioapic->rtc_status.pending_eoi--;
+ rtc_status_pending_eoi_check_valid(ioapic);
}
-
- WARN_ON(ioapic->rtc_status.pending_eoi < 0);
}
void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu)
{
- if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map))
+ if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map)) {
--ioapic->rtc_status.pending_eoi;
-
- WARN_ON(ioapic->rtc_status.pending_eoi < 0);
+ rtc_status_pending_eoi_check_valid(ioapic);
+ }
}
static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
ioapic->irr &= ~(1 << irq);
if (irq == RTC_GSI && line_status) {
+ /*
+ * pending_eoi cannot ever become negative (see
+ * rtc_status_pending_eoi_check_valid) and the caller
+ * ensures that it is only called if it is >= zero, namely
+ * if rtc_irq_check_coalesced returns false).
+ */
BUG_ON(ioapic->rtc_status.pending_eoi != 0);
ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
ioapic->rtc_status.dest_map);
- ioapic->rtc_status.pending_eoi = ret;
+ ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
} else
ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);