2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
49 static const char *yesno(int v)
51 return v ? "yes" : "no";
54 /* As the drm_debugfs_init() routines are called before dev->dev_private is
55 * allocated we need to hook into the minor for release. */
57 drm_add_fake_info_node(struct drm_minor *minor,
61 struct drm_info_node *node;
63 node = kmalloc(sizeof(*node), GFP_KERNEL);
71 node->info_ent = (void *) key;
73 mutex_lock(&minor->debugfs_lock);
74 list_add(&node->list, &minor->debugfs_list);
75 mutex_unlock(&minor->debugfs_lock);
80 static int i915_capabilities(struct seq_file *m, void *data)
82 struct drm_info_node *node = (struct drm_info_node *) m->private;
83 struct drm_device *dev = node->minor->dev;
84 const struct intel_device_info *info = INTEL_INFO(dev);
86 seq_printf(m, "gen: %d\n", info->gen);
87 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
88 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
89 #define SEP_SEMICOLON ;
90 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
97 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
99 if (obj->user_pin_count > 0)
101 else if (i915_gem_obj_is_pinned(obj))
107 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
109 switch (obj->tiling_mode) {
111 case I915_TILING_NONE: return " ";
112 case I915_TILING_X: return "X";
113 case I915_TILING_Y: return "Y";
117 static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
119 return obj->has_global_gtt_mapping ? "g" : " ";
123 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
125 struct i915_vma *vma;
128 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
131 get_tiling_flag(obj),
132 get_global_flag(obj),
133 obj->base.size / 1024,
134 obj->base.read_domains,
135 obj->base.write_domain,
136 obj->last_read_seqno,
137 obj->last_write_seqno,
138 obj->last_fenced_seqno,
139 i915_cache_level_str(obj->cache_level),
140 obj->dirty ? " dirty" : "",
141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
143 seq_printf(m, " (name: %d)", obj->base.name);
144 list_for_each_entry(vma, &obj->vma_list, vma_link)
145 if (vma->pin_count > 0)
147 seq_printf(m, " (pinned x %d)", pin_count);
148 if (obj->pin_display)
149 seq_printf(m, " (display)");
150 if (obj->fence_reg != I915_FENCE_REG_NONE)
151 seq_printf(m, " (fence: %d)", obj->fence_reg);
152 list_for_each_entry(vma, &obj->vma_list, vma_link) {
153 if (!i915_is_ggtt(vma->vm))
157 seq_printf(m, "gtt offset: %08lx, size: %08lx)",
158 vma->node.start, vma->node.size);
161 seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
162 if (obj->pin_mappable || obj->fault_mappable) {
164 if (obj->pin_mappable)
166 if (obj->fault_mappable)
169 seq_printf(m, " (%s mappable)", s);
171 if (obj->ring != NULL)
172 seq_printf(m, " (%s)", obj->ring->name);
175 static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
177 seq_putc(m, ctx->is_initialized ? 'I' : 'i');
178 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
182 static int i915_gem_object_list_info(struct seq_file *m, void *data)
184 struct drm_info_node *node = (struct drm_info_node *) m->private;
185 uintptr_t list = (uintptr_t) node->info_ent->data;
186 struct list_head *head;
187 struct drm_device *dev = node->minor->dev;
188 struct drm_i915_private *dev_priv = dev->dev_private;
189 struct i915_address_space *vm = &dev_priv->gtt.base;
190 struct i915_vma *vma;
191 size_t total_obj_size, total_gtt_size;
194 ret = mutex_lock_interruptible(&dev->struct_mutex);
198 /* FIXME: the user of this interface might want more than just GGTT */
201 seq_puts(m, "Active:\n");
202 head = &vm->active_list;
205 seq_puts(m, "Inactive:\n");
206 head = &vm->inactive_list;
209 mutex_unlock(&dev->struct_mutex);
213 total_obj_size = total_gtt_size = count = 0;
214 list_for_each_entry(vma, head, mm_list) {
216 describe_obj(m, vma->obj);
218 total_obj_size += vma->obj->base.size;
219 total_gtt_size += vma->node.size;
222 mutex_unlock(&dev->struct_mutex);
224 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
225 count, total_obj_size, total_gtt_size);
229 static int obj_rank_by_stolen(void *priv,
230 struct list_head *A, struct list_head *B)
232 struct drm_i915_gem_object *a =
233 container_of(A, struct drm_i915_gem_object, obj_exec_link);
234 struct drm_i915_gem_object *b =
235 container_of(B, struct drm_i915_gem_object, obj_exec_link);
237 return a->stolen->start - b->stolen->start;
240 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
242 struct drm_info_node *node = (struct drm_info_node *) m->private;
243 struct drm_device *dev = node->minor->dev;
244 struct drm_i915_private *dev_priv = dev->dev_private;
245 struct drm_i915_gem_object *obj;
246 size_t total_obj_size, total_gtt_size;
250 ret = mutex_lock_interruptible(&dev->struct_mutex);
254 total_obj_size = total_gtt_size = count = 0;
255 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
256 if (obj->stolen == NULL)
259 list_add(&obj->obj_exec_link, &stolen);
261 total_obj_size += obj->base.size;
262 total_gtt_size += i915_gem_obj_ggtt_size(obj);
265 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
266 if (obj->stolen == NULL)
269 list_add(&obj->obj_exec_link, &stolen);
271 total_obj_size += obj->base.size;
274 list_sort(NULL, &stolen, obj_rank_by_stolen);
275 seq_puts(m, "Stolen:\n");
276 while (!list_empty(&stolen)) {
277 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
279 describe_obj(m, obj);
281 list_del_init(&obj->obj_exec_link);
283 mutex_unlock(&dev->struct_mutex);
285 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
286 count, total_obj_size, total_gtt_size);
290 #define count_objects(list, member) do { \
291 list_for_each_entry(obj, list, member) { \
292 size += i915_gem_obj_ggtt_size(obj); \
294 if (obj->map_and_fenceable) { \
295 mappable_size += i915_gem_obj_ggtt_size(obj); \
303 size_t total, active, inactive, unbound;
306 static int per_file_stats(int id, void *ptr, void *data)
308 struct drm_i915_gem_object *obj = ptr;
309 struct file_stats *stats = data;
312 stats->total += obj->base.size;
314 if (i915_gem_obj_ggtt_bound(obj)) {
315 if (!list_empty(&obj->ring_list))
316 stats->active += obj->base.size;
318 stats->inactive += obj->base.size;
320 if (!list_empty(&obj->global_list))
321 stats->unbound += obj->base.size;
327 #define count_vmas(list, member) do { \
328 list_for_each_entry(vma, list, member) { \
329 size += i915_gem_obj_ggtt_size(vma->obj); \
331 if (vma->obj->map_and_fenceable) { \
332 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
338 static int i915_gem_object_info(struct seq_file *m, void* data)
340 struct drm_info_node *node = (struct drm_info_node *) m->private;
341 struct drm_device *dev = node->minor->dev;
342 struct drm_i915_private *dev_priv = dev->dev_private;
343 u32 count, mappable_count, purgeable_count;
344 size_t size, mappable_size, purgeable_size;
345 struct drm_i915_gem_object *obj;
346 struct i915_address_space *vm = &dev_priv->gtt.base;
347 struct drm_file *file;
348 struct i915_vma *vma;
351 ret = mutex_lock_interruptible(&dev->struct_mutex);
355 seq_printf(m, "%u objects, %zu bytes\n",
356 dev_priv->mm.object_count,
357 dev_priv->mm.object_memory);
359 size = count = mappable_size = mappable_count = 0;
360 count_objects(&dev_priv->mm.bound_list, global_list);
361 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
362 count, mappable_count, size, mappable_size);
364 size = count = mappable_size = mappable_count = 0;
365 count_vmas(&vm->active_list, mm_list);
366 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
367 count, mappable_count, size, mappable_size);
369 size = count = mappable_size = mappable_count = 0;
370 count_vmas(&vm->inactive_list, mm_list);
371 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
372 count, mappable_count, size, mappable_size);
374 size = count = purgeable_size = purgeable_count = 0;
375 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
376 size += obj->base.size, ++count;
377 if (obj->madv == I915_MADV_DONTNEED)
378 purgeable_size += obj->base.size, ++purgeable_count;
380 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
382 size = count = mappable_size = mappable_count = 0;
383 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
384 if (obj->fault_mappable) {
385 size += i915_gem_obj_ggtt_size(obj);
388 if (obj->pin_mappable) {
389 mappable_size += i915_gem_obj_ggtt_size(obj);
392 if (obj->madv == I915_MADV_DONTNEED) {
393 purgeable_size += obj->base.size;
397 seq_printf(m, "%u purgeable objects, %zu bytes\n",
398 purgeable_count, purgeable_size);
399 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
400 mappable_count, mappable_size);
401 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
404 seq_printf(m, "%zu [%lu] gtt total\n",
405 dev_priv->gtt.base.total,
406 dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
409 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
410 struct file_stats stats;
411 struct task_struct *task;
413 memset(&stats, 0, sizeof(stats));
414 idr_for_each(&file->object_idr, per_file_stats, &stats);
416 * Although we have a valid reference on file->pid, that does
417 * not guarantee that the task_struct who called get_pid() is
418 * still alive (e.g. get_pid(current) => fork() => exit()).
419 * Therefore, we need to protect this ->comm access using RCU.
422 task = pid_task(file->pid, PIDTYPE_PID);
423 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
424 task ? task->comm : "<unknown>",
433 mutex_unlock(&dev->struct_mutex);
438 static int i915_gem_gtt_info(struct seq_file *m, void *data)
440 struct drm_info_node *node = (struct drm_info_node *) m->private;
441 struct drm_device *dev = node->minor->dev;
442 uintptr_t list = (uintptr_t) node->info_ent->data;
443 struct drm_i915_private *dev_priv = dev->dev_private;
444 struct drm_i915_gem_object *obj;
445 size_t total_obj_size, total_gtt_size;
448 ret = mutex_lock_interruptible(&dev->struct_mutex);
452 total_obj_size = total_gtt_size = count = 0;
453 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
454 if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
458 describe_obj(m, obj);
460 total_obj_size += obj->base.size;
461 total_gtt_size += i915_gem_obj_ggtt_size(obj);
465 mutex_unlock(&dev->struct_mutex);
467 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
468 count, total_obj_size, total_gtt_size);
473 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
475 struct drm_info_node *node = (struct drm_info_node *) m->private;
476 struct drm_device *dev = node->minor->dev;
478 struct intel_crtc *crtc;
480 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
481 const char pipe = pipe_name(crtc->pipe);
482 const char plane = plane_name(crtc->plane);
483 struct intel_unpin_work *work;
485 spin_lock_irqsave(&dev->event_lock, flags);
486 work = crtc->unpin_work;
488 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
491 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
492 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
495 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
498 if (work->enable_stall_check)
499 seq_puts(m, "Stall check enabled, ");
501 seq_puts(m, "Stall check waiting for page flip ioctl, ");
502 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
504 if (work->old_fb_obj) {
505 struct drm_i915_gem_object *obj = work->old_fb_obj;
507 seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
508 i915_gem_obj_ggtt_offset(obj));
510 if (work->pending_flip_obj) {
511 struct drm_i915_gem_object *obj = work->pending_flip_obj;
513 seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
514 i915_gem_obj_ggtt_offset(obj));
517 spin_unlock_irqrestore(&dev->event_lock, flags);
523 static int i915_gem_request_info(struct seq_file *m, void *data)
525 struct drm_info_node *node = (struct drm_info_node *) m->private;
526 struct drm_device *dev = node->minor->dev;
527 drm_i915_private_t *dev_priv = dev->dev_private;
528 struct intel_ring_buffer *ring;
529 struct drm_i915_gem_request *gem_request;
532 ret = mutex_lock_interruptible(&dev->struct_mutex);
537 for_each_ring(ring, dev_priv, i) {
538 if (list_empty(&ring->request_list))
541 seq_printf(m, "%s requests:\n", ring->name);
542 list_for_each_entry(gem_request,
545 seq_printf(m, " %d @ %d\n",
547 (int) (jiffies - gem_request->emitted_jiffies));
551 mutex_unlock(&dev->struct_mutex);
554 seq_puts(m, "No requests\n");
559 static void i915_ring_seqno_info(struct seq_file *m,
560 struct intel_ring_buffer *ring)
562 if (ring->get_seqno) {
563 seq_printf(m, "Current sequence (%s): %u\n",
564 ring->name, ring->get_seqno(ring, false));
568 static int i915_gem_seqno_info(struct seq_file *m, void *data)
570 struct drm_info_node *node = (struct drm_info_node *) m->private;
571 struct drm_device *dev = node->minor->dev;
572 drm_i915_private_t *dev_priv = dev->dev_private;
573 struct intel_ring_buffer *ring;
576 ret = mutex_lock_interruptible(&dev->struct_mutex);
579 intel_runtime_pm_get(dev_priv);
581 for_each_ring(ring, dev_priv, i)
582 i915_ring_seqno_info(m, ring);
584 intel_runtime_pm_put(dev_priv);
585 mutex_unlock(&dev->struct_mutex);
591 static int i915_interrupt_info(struct seq_file *m, void *data)
593 struct drm_info_node *node = (struct drm_info_node *) m->private;
594 struct drm_device *dev = node->minor->dev;
595 drm_i915_private_t *dev_priv = dev->dev_private;
596 struct intel_ring_buffer *ring;
599 ret = mutex_lock_interruptible(&dev->struct_mutex);
602 intel_runtime_pm_get(dev_priv);
604 if (INTEL_INFO(dev)->gen >= 8) {
606 seq_printf(m, "Master Interrupt Control:\t%08x\n",
607 I915_READ(GEN8_MASTER_IRQ));
609 for (i = 0; i < 4; i++) {
610 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
611 i, I915_READ(GEN8_GT_IMR(i)));
612 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
613 i, I915_READ(GEN8_GT_IIR(i)));
614 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
615 i, I915_READ(GEN8_GT_IER(i)));
619 seq_printf(m, "Pipe %c IMR:\t%08x\n",
621 I915_READ(GEN8_DE_PIPE_IMR(i)));
622 seq_printf(m, "Pipe %c IIR:\t%08x\n",
624 I915_READ(GEN8_DE_PIPE_IIR(i)));
625 seq_printf(m, "Pipe %c IER:\t%08x\n",
627 I915_READ(GEN8_DE_PIPE_IER(i)));
630 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
631 I915_READ(GEN8_DE_PORT_IMR));
632 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
633 I915_READ(GEN8_DE_PORT_IIR));
634 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
635 I915_READ(GEN8_DE_PORT_IER));
637 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
638 I915_READ(GEN8_DE_MISC_IMR));
639 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
640 I915_READ(GEN8_DE_MISC_IIR));
641 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
642 I915_READ(GEN8_DE_MISC_IER));
644 seq_printf(m, "PCU interrupt mask:\t%08x\n",
645 I915_READ(GEN8_PCU_IMR));
646 seq_printf(m, "PCU interrupt identity:\t%08x\n",
647 I915_READ(GEN8_PCU_IIR));
648 seq_printf(m, "PCU interrupt enable:\t%08x\n",
649 I915_READ(GEN8_PCU_IER));
650 } else if (IS_VALLEYVIEW(dev)) {
651 seq_printf(m, "Display IER:\t%08x\n",
653 seq_printf(m, "Display IIR:\t%08x\n",
655 seq_printf(m, "Display IIR_RW:\t%08x\n",
656 I915_READ(VLV_IIR_RW));
657 seq_printf(m, "Display IMR:\t%08x\n",
660 seq_printf(m, "Pipe %c stat:\t%08x\n",
662 I915_READ(PIPESTAT(pipe)));
664 seq_printf(m, "Master IER:\t%08x\n",
665 I915_READ(VLV_MASTER_IER));
667 seq_printf(m, "Render IER:\t%08x\n",
669 seq_printf(m, "Render IIR:\t%08x\n",
671 seq_printf(m, "Render IMR:\t%08x\n",
674 seq_printf(m, "PM IER:\t\t%08x\n",
675 I915_READ(GEN6_PMIER));
676 seq_printf(m, "PM IIR:\t\t%08x\n",
677 I915_READ(GEN6_PMIIR));
678 seq_printf(m, "PM IMR:\t\t%08x\n",
679 I915_READ(GEN6_PMIMR));
681 seq_printf(m, "Port hotplug:\t%08x\n",
682 I915_READ(PORT_HOTPLUG_EN));
683 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
684 I915_READ(VLV_DPFLIPSTAT));
685 seq_printf(m, "DPINVGTT:\t%08x\n",
686 I915_READ(DPINVGTT));
688 } else if (!HAS_PCH_SPLIT(dev)) {
689 seq_printf(m, "Interrupt enable: %08x\n",
691 seq_printf(m, "Interrupt identity: %08x\n",
693 seq_printf(m, "Interrupt mask: %08x\n",
696 seq_printf(m, "Pipe %c stat: %08x\n",
698 I915_READ(PIPESTAT(pipe)));
700 seq_printf(m, "North Display Interrupt enable: %08x\n",
702 seq_printf(m, "North Display Interrupt identity: %08x\n",
704 seq_printf(m, "North Display Interrupt mask: %08x\n",
706 seq_printf(m, "South Display Interrupt enable: %08x\n",
708 seq_printf(m, "South Display Interrupt identity: %08x\n",
710 seq_printf(m, "South Display Interrupt mask: %08x\n",
712 seq_printf(m, "Graphics Interrupt enable: %08x\n",
714 seq_printf(m, "Graphics Interrupt identity: %08x\n",
716 seq_printf(m, "Graphics Interrupt mask: %08x\n",
719 for_each_ring(ring, dev_priv, i) {
720 if (INTEL_INFO(dev)->gen >= 6) {
722 "Graphics Interrupt mask (%s): %08x\n",
723 ring->name, I915_READ_IMR(ring));
725 i915_ring_seqno_info(m, ring);
727 intel_runtime_pm_put(dev_priv);
728 mutex_unlock(&dev->struct_mutex);
733 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
735 struct drm_info_node *node = (struct drm_info_node *) m->private;
736 struct drm_device *dev = node->minor->dev;
737 drm_i915_private_t *dev_priv = dev->dev_private;
740 ret = mutex_lock_interruptible(&dev->struct_mutex);
744 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
745 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
746 for (i = 0; i < dev_priv->num_fence_regs; i++) {
747 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
749 seq_printf(m, "Fence %d, pin count = %d, object = ",
750 i, dev_priv->fence_regs[i].pin_count);
752 seq_puts(m, "unused");
754 describe_obj(m, obj);
758 mutex_unlock(&dev->struct_mutex);
762 static int i915_hws_info(struct seq_file *m, void *data)
764 struct drm_info_node *node = (struct drm_info_node *) m->private;
765 struct drm_device *dev = node->minor->dev;
766 drm_i915_private_t *dev_priv = dev->dev_private;
767 struct intel_ring_buffer *ring;
771 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
772 hws = ring->status_page.page_addr;
776 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
777 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
779 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
785 i915_error_state_write(struct file *filp,
786 const char __user *ubuf,
790 struct i915_error_state_file_priv *error_priv = filp->private_data;
791 struct drm_device *dev = error_priv->dev;
794 DRM_DEBUG_DRIVER("Resetting error state\n");
796 ret = mutex_lock_interruptible(&dev->struct_mutex);
800 i915_destroy_error_state(dev);
801 mutex_unlock(&dev->struct_mutex);
806 static int i915_error_state_open(struct inode *inode, struct file *file)
808 struct drm_device *dev = inode->i_private;
809 struct i915_error_state_file_priv *error_priv;
811 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
815 error_priv->dev = dev;
817 i915_error_state_get(dev, error_priv);
819 file->private_data = error_priv;
824 static int i915_error_state_release(struct inode *inode, struct file *file)
826 struct i915_error_state_file_priv *error_priv = file->private_data;
828 i915_error_state_put(error_priv);
834 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
835 size_t count, loff_t *pos)
837 struct i915_error_state_file_priv *error_priv = file->private_data;
838 struct drm_i915_error_state_buf error_str;
840 ssize_t ret_count = 0;
843 ret = i915_error_state_buf_init(&error_str, count, *pos);
847 ret = i915_error_state_to_str(&error_str, error_priv);
851 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
858 *pos = error_str.start + ret_count;
860 i915_error_state_buf_release(&error_str);
861 return ret ?: ret_count;
864 static const struct file_operations i915_error_state_fops = {
865 .owner = THIS_MODULE,
866 .open = i915_error_state_open,
867 .read = i915_error_state_read,
868 .write = i915_error_state_write,
869 .llseek = default_llseek,
870 .release = i915_error_state_release,
874 i915_next_seqno_get(void *data, u64 *val)
876 struct drm_device *dev = data;
877 drm_i915_private_t *dev_priv = dev->dev_private;
880 ret = mutex_lock_interruptible(&dev->struct_mutex);
884 *val = dev_priv->next_seqno;
885 mutex_unlock(&dev->struct_mutex);
891 i915_next_seqno_set(void *data, u64 val)
893 struct drm_device *dev = data;
896 ret = mutex_lock_interruptible(&dev->struct_mutex);
900 ret = i915_gem_set_seqno(dev, val);
901 mutex_unlock(&dev->struct_mutex);
906 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
907 i915_next_seqno_get, i915_next_seqno_set,
910 static int i915_rstdby_delays(struct seq_file *m, void *unused)
912 struct drm_info_node *node = (struct drm_info_node *) m->private;
913 struct drm_device *dev = node->minor->dev;
914 drm_i915_private_t *dev_priv = dev->dev_private;
918 ret = mutex_lock_interruptible(&dev->struct_mutex);
921 intel_runtime_pm_get(dev_priv);
923 crstanddelay = I915_READ16(CRSTANDVID);
925 intel_runtime_pm_put(dev_priv);
926 mutex_unlock(&dev->struct_mutex);
928 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
933 static int i915_cur_delayinfo(struct seq_file *m, void *unused)
935 struct drm_info_node *node = (struct drm_info_node *) m->private;
936 struct drm_device *dev = node->minor->dev;
937 drm_i915_private_t *dev_priv = dev->dev_private;
940 intel_runtime_pm_get(dev_priv);
942 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
945 u16 rgvswctl = I915_READ16(MEMSWCTL);
946 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
948 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
949 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
950 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
952 seq_printf(m, "Current P-state: %d\n",
953 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
954 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
955 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
956 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
957 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
958 u32 rpstat, cagf, reqf;
959 u32 rpupei, rpcurup, rpprevup;
960 u32 rpdownei, rpcurdown, rpprevdown;
963 /* RPSTAT1 is in the GT power well */
964 ret = mutex_lock_interruptible(&dev->struct_mutex);
968 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
970 reqf = I915_READ(GEN6_RPNSWREQ);
971 reqf &= ~GEN6_TURBO_DISABLE;
976 reqf *= GT_FREQUENCY_MULTIPLIER;
978 rpstat = I915_READ(GEN6_RPSTAT1);
979 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
980 rpcurup = I915_READ(GEN6_RP_CUR_UP);
981 rpprevup = I915_READ(GEN6_RP_PREV_UP);
982 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
983 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
984 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
986 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
988 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
989 cagf *= GT_FREQUENCY_MULTIPLIER;
991 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
992 mutex_unlock(&dev->struct_mutex);
994 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
995 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
996 seq_printf(m, "Render p-state ratio: %d\n",
997 (gt_perf_status & 0xff00) >> 8);
998 seq_printf(m, "Render p-state VID: %d\n",
999 gt_perf_status & 0xff);
1000 seq_printf(m, "Render p-state limit: %d\n",
1001 rp_state_limits & 0xff);
1002 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1003 seq_printf(m, "CAGF: %dMHz\n", cagf);
1004 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
1005 GEN6_CURICONT_MASK);
1006 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
1007 GEN6_CURBSYTAVG_MASK);
1008 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
1009 GEN6_CURBSYTAVG_MASK);
1010 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
1012 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
1013 GEN6_CURBSYTAVG_MASK);
1014 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
1015 GEN6_CURBSYTAVG_MASK);
1017 max_freq = (rp_state_cap & 0xff0000) >> 16;
1018 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1019 max_freq * GT_FREQUENCY_MULTIPLIER);
1021 max_freq = (rp_state_cap & 0xff00) >> 8;
1022 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1023 max_freq * GT_FREQUENCY_MULTIPLIER);
1025 max_freq = rp_state_cap & 0xff;
1026 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1027 max_freq * GT_FREQUENCY_MULTIPLIER);
1029 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1030 dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
1031 } else if (IS_VALLEYVIEW(dev)) {
1034 mutex_lock(&dev_priv->rps.hw_lock);
1035 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1036 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1037 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1039 val = valleyview_rps_max_freq(dev_priv);
1040 seq_printf(m, "max GPU freq: %d MHz\n",
1041 vlv_gpu_freq(dev_priv, val));
1043 val = valleyview_rps_min_freq(dev_priv);
1044 seq_printf(m, "min GPU freq: %d MHz\n",
1045 vlv_gpu_freq(dev_priv, val));
1047 seq_printf(m, "current GPU freq: %d MHz\n",
1048 vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1049 mutex_unlock(&dev_priv->rps.hw_lock);
1051 seq_puts(m, "no P-state info available\n");
1055 intel_runtime_pm_put(dev_priv);
1059 static int i915_delayfreq_table(struct seq_file *m, void *unused)
1061 struct drm_info_node *node = (struct drm_info_node *) m->private;
1062 struct drm_device *dev = node->minor->dev;
1063 drm_i915_private_t *dev_priv = dev->dev_private;
1067 ret = mutex_lock_interruptible(&dev->struct_mutex);
1070 intel_runtime_pm_get(dev_priv);
1072 for (i = 0; i < 16; i++) {
1073 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
1074 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
1075 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
1078 intel_runtime_pm_put(dev_priv);
1080 mutex_unlock(&dev->struct_mutex);
1085 static inline int MAP_TO_MV(int map)
1087 return 1250 - (map * 25);
1090 static int i915_inttoext_table(struct seq_file *m, void *unused)
1092 struct drm_info_node *node = (struct drm_info_node *) m->private;
1093 struct drm_device *dev = node->minor->dev;
1094 drm_i915_private_t *dev_priv = dev->dev_private;
1098 ret = mutex_lock_interruptible(&dev->struct_mutex);
1101 intel_runtime_pm_get(dev_priv);
1103 for (i = 1; i <= 32; i++) {
1104 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
1105 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
1108 intel_runtime_pm_put(dev_priv);
1109 mutex_unlock(&dev->struct_mutex);
1114 static int ironlake_drpc_info(struct seq_file *m)
1116 struct drm_info_node *node = (struct drm_info_node *) m->private;
1117 struct drm_device *dev = node->minor->dev;
1118 drm_i915_private_t *dev_priv = dev->dev_private;
1119 u32 rgvmodectl, rstdbyctl;
1123 ret = mutex_lock_interruptible(&dev->struct_mutex);
1126 intel_runtime_pm_get(dev_priv);
1128 rgvmodectl = I915_READ(MEMMODECTL);
1129 rstdbyctl = I915_READ(RSTDBYCTL);
1130 crstandvid = I915_READ16(CRSTANDVID);
1132 intel_runtime_pm_put(dev_priv);
1133 mutex_unlock(&dev->struct_mutex);
1135 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1137 seq_printf(m, "Boost freq: %d\n",
1138 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1139 MEMMODE_BOOST_FREQ_SHIFT);
1140 seq_printf(m, "HW control enabled: %s\n",
1141 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1142 seq_printf(m, "SW control enabled: %s\n",
1143 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1144 seq_printf(m, "Gated voltage change: %s\n",
1145 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1146 seq_printf(m, "Starting frequency: P%d\n",
1147 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1148 seq_printf(m, "Max P-state: P%d\n",
1149 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1150 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1151 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1152 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1153 seq_printf(m, "Render standby enabled: %s\n",
1154 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1155 seq_puts(m, "Current RS state: ");
1156 switch (rstdbyctl & RSX_STATUS_MASK) {
1158 seq_puts(m, "on\n");
1160 case RSX_STATUS_RC1:
1161 seq_puts(m, "RC1\n");
1163 case RSX_STATUS_RC1E:
1164 seq_puts(m, "RC1E\n");
1166 case RSX_STATUS_RS1:
1167 seq_puts(m, "RS1\n");
1169 case RSX_STATUS_RS2:
1170 seq_puts(m, "RS2 (RC6)\n");
1172 case RSX_STATUS_RS3:
1173 seq_puts(m, "RC3 (RC6+)\n");
1176 seq_puts(m, "unknown\n");
1183 static int vlv_drpc_info(struct seq_file *m)
1186 struct drm_info_node *node = (struct drm_info_node *) m->private;
1187 struct drm_device *dev = node->minor->dev;
1188 struct drm_i915_private *dev_priv = dev->dev_private;
1189 u32 rpmodectl1, rcctl1;
1190 unsigned fw_rendercount = 0, fw_mediacount = 0;
1192 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1193 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1195 seq_printf(m, "Video Turbo Mode: %s\n",
1196 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1197 seq_printf(m, "Turbo enabled: %s\n",
1198 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1199 seq_printf(m, "HW control enabled: %s\n",
1200 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1201 seq_printf(m, "SW control enabled: %s\n",
1202 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1203 GEN6_RP_MEDIA_SW_MODE));
1204 seq_printf(m, "RC6 Enabled: %s\n",
1205 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1206 GEN6_RC_CTL_EI_MODE(1))));
1207 seq_printf(m, "Render Power Well: %s\n",
1208 (I915_READ(VLV_GTLC_PW_STATUS) &
1209 VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1210 seq_printf(m, "Media Power Well: %s\n",
1211 (I915_READ(VLV_GTLC_PW_STATUS) &
1212 VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1214 spin_lock_irq(&dev_priv->uncore.lock);
1215 fw_rendercount = dev_priv->uncore.fw_rendercount;
1216 fw_mediacount = dev_priv->uncore.fw_mediacount;
1217 spin_unlock_irq(&dev_priv->uncore.lock);
1219 seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount);
1220 seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount);
1227 static int gen6_drpc_info(struct seq_file *m)
1230 struct drm_info_node *node = (struct drm_info_node *) m->private;
1231 struct drm_device *dev = node->minor->dev;
1232 struct drm_i915_private *dev_priv = dev->dev_private;
1233 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1234 unsigned forcewake_count;
1237 ret = mutex_lock_interruptible(&dev->struct_mutex);
1240 intel_runtime_pm_get(dev_priv);
1242 spin_lock_irq(&dev_priv->uncore.lock);
1243 forcewake_count = dev_priv->uncore.forcewake_count;
1244 spin_unlock_irq(&dev_priv->uncore.lock);
1246 if (forcewake_count) {
1247 seq_puts(m, "RC information inaccurate because somebody "
1248 "holds a forcewake reference \n");
1250 /* NB: we cannot use forcewake, else we read the wrong values */
1251 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1253 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1256 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1257 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1259 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1260 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1261 mutex_unlock(&dev->struct_mutex);
1262 mutex_lock(&dev_priv->rps.hw_lock);
1263 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1264 mutex_unlock(&dev_priv->rps.hw_lock);
1266 intel_runtime_pm_put(dev_priv);
1268 seq_printf(m, "Video Turbo Mode: %s\n",
1269 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1270 seq_printf(m, "HW control enabled: %s\n",
1271 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1272 seq_printf(m, "SW control enabled: %s\n",
1273 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1274 GEN6_RP_MEDIA_SW_MODE));
1275 seq_printf(m, "RC1e Enabled: %s\n",
1276 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1277 seq_printf(m, "RC6 Enabled: %s\n",
1278 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1279 seq_printf(m, "Deep RC6 Enabled: %s\n",
1280 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1281 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1282 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1283 seq_puts(m, "Current RC state: ");
1284 switch (gt_core_status & GEN6_RCn_MASK) {
1286 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1287 seq_puts(m, "Core Power Down\n");
1289 seq_puts(m, "on\n");
1292 seq_puts(m, "RC3\n");
1295 seq_puts(m, "RC6\n");
1298 seq_puts(m, "RC7\n");
1301 seq_puts(m, "Unknown\n");
1305 seq_printf(m, "Core Power Down: %s\n",
1306 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1308 /* Not exactly sure what this is */
1309 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1310 I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1311 seq_printf(m, "RC6 residency since boot: %u\n",
1312 I915_READ(GEN6_GT_GFX_RC6));
1313 seq_printf(m, "RC6+ residency since boot: %u\n",
1314 I915_READ(GEN6_GT_GFX_RC6p));
1315 seq_printf(m, "RC6++ residency since boot: %u\n",
1316 I915_READ(GEN6_GT_GFX_RC6pp));
1318 seq_printf(m, "RC6 voltage: %dmV\n",
1319 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1320 seq_printf(m, "RC6+ voltage: %dmV\n",
1321 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1322 seq_printf(m, "RC6++ voltage: %dmV\n",
1323 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1327 static int i915_drpc_info(struct seq_file *m, void *unused)
1329 struct drm_info_node *node = (struct drm_info_node *) m->private;
1330 struct drm_device *dev = node->minor->dev;
1332 if (IS_VALLEYVIEW(dev))
1333 return vlv_drpc_info(m);
1334 else if (IS_GEN6(dev) || IS_GEN7(dev))
1335 return gen6_drpc_info(m);
1337 return ironlake_drpc_info(m);
1340 static int i915_fbc_status(struct seq_file *m, void *unused)
1342 struct drm_info_node *node = (struct drm_info_node *) m->private;
1343 struct drm_device *dev = node->minor->dev;
1344 drm_i915_private_t *dev_priv = dev->dev_private;
1346 if (!HAS_FBC(dev)) {
1347 seq_puts(m, "FBC unsupported on this chipset\n");
1351 intel_runtime_pm_get(dev_priv);
1353 if (intel_fbc_enabled(dev)) {
1354 seq_puts(m, "FBC enabled\n");
1356 seq_puts(m, "FBC disabled: ");
1357 switch (dev_priv->fbc.no_fbc_reason) {
1359 seq_puts(m, "FBC actived, but currently disabled in hardware");
1361 case FBC_UNSUPPORTED:
1362 seq_puts(m, "unsupported by this chipset");
1365 seq_puts(m, "no outputs");
1367 case FBC_STOLEN_TOO_SMALL:
1368 seq_puts(m, "not enough stolen memory");
1370 case FBC_UNSUPPORTED_MODE:
1371 seq_puts(m, "mode not supported");
1373 case FBC_MODE_TOO_LARGE:
1374 seq_puts(m, "mode too large");
1377 seq_puts(m, "FBC unsupported on plane");
1380 seq_puts(m, "scanout buffer not tiled");
1382 case FBC_MULTIPLE_PIPES:
1383 seq_puts(m, "multiple pipes are enabled");
1385 case FBC_MODULE_PARAM:
1386 seq_puts(m, "disabled per module param (default off)");
1388 case FBC_CHIP_DEFAULT:
1389 seq_puts(m, "disabled per chip default");
1392 seq_puts(m, "unknown reason");
1397 intel_runtime_pm_put(dev_priv);
1402 static int i915_ips_status(struct seq_file *m, void *unused)
1404 struct drm_info_node *node = (struct drm_info_node *) m->private;
1405 struct drm_device *dev = node->minor->dev;
1406 struct drm_i915_private *dev_priv = dev->dev_private;
1408 if (!HAS_IPS(dev)) {
1409 seq_puts(m, "not supported\n");
1413 intel_runtime_pm_get(dev_priv);
1415 if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
1416 seq_puts(m, "enabled\n");
1418 seq_puts(m, "disabled\n");
1420 intel_runtime_pm_put(dev_priv);
1425 static int i915_sr_status(struct seq_file *m, void *unused)
1427 struct drm_info_node *node = (struct drm_info_node *) m->private;
1428 struct drm_device *dev = node->minor->dev;
1429 drm_i915_private_t *dev_priv = dev->dev_private;
1430 bool sr_enabled = false;
1432 intel_runtime_pm_get(dev_priv);
1434 if (HAS_PCH_SPLIT(dev))
1435 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1436 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1437 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1438 else if (IS_I915GM(dev))
1439 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1440 else if (IS_PINEVIEW(dev))
1441 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1443 intel_runtime_pm_put(dev_priv);
1445 seq_printf(m, "self-refresh: %s\n",
1446 sr_enabled ? "enabled" : "disabled");
1451 static int i915_emon_status(struct seq_file *m, void *unused)
1453 struct drm_info_node *node = (struct drm_info_node *) m->private;
1454 struct drm_device *dev = node->minor->dev;
1455 drm_i915_private_t *dev_priv = dev->dev_private;
1456 unsigned long temp, chipset, gfx;
1462 ret = mutex_lock_interruptible(&dev->struct_mutex);
1466 temp = i915_mch_val(dev_priv);
1467 chipset = i915_chipset_val(dev_priv);
1468 gfx = i915_gfx_val(dev_priv);
1469 mutex_unlock(&dev->struct_mutex);
1471 seq_printf(m, "GMCH temp: %ld\n", temp);
1472 seq_printf(m, "Chipset power: %ld\n", chipset);
1473 seq_printf(m, "GFX power: %ld\n", gfx);
1474 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1479 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1481 struct drm_info_node *node = (struct drm_info_node *) m->private;
1482 struct drm_device *dev = node->minor->dev;
1483 drm_i915_private_t *dev_priv = dev->dev_private;
1485 int gpu_freq, ia_freq;
1487 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1488 seq_puts(m, "unsupported on this chipset\n");
1492 intel_runtime_pm_get(dev_priv);
1494 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1496 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1500 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1502 for (gpu_freq = dev_priv->rps.min_delay;
1503 gpu_freq <= dev_priv->rps.max_delay;
1506 sandybridge_pcode_read(dev_priv,
1507 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1509 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1510 gpu_freq * GT_FREQUENCY_MULTIPLIER,
1511 ((ia_freq >> 0) & 0xff) * 100,
1512 ((ia_freq >> 8) & 0xff) * 100);
1515 mutex_unlock(&dev_priv->rps.hw_lock);
1518 intel_runtime_pm_put(dev_priv);
1522 static int i915_gfxec(struct seq_file *m, void *unused)
1524 struct drm_info_node *node = (struct drm_info_node *) m->private;
1525 struct drm_device *dev = node->minor->dev;
1526 drm_i915_private_t *dev_priv = dev->dev_private;
1529 ret = mutex_lock_interruptible(&dev->struct_mutex);
1532 intel_runtime_pm_get(dev_priv);
1534 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1535 intel_runtime_pm_put(dev_priv);
1537 mutex_unlock(&dev->struct_mutex);
1542 static int i915_opregion(struct seq_file *m, void *unused)
1544 struct drm_info_node *node = (struct drm_info_node *) m->private;
1545 struct drm_device *dev = node->minor->dev;
1546 drm_i915_private_t *dev_priv = dev->dev_private;
1547 struct intel_opregion *opregion = &dev_priv->opregion;
1548 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1554 ret = mutex_lock_interruptible(&dev->struct_mutex);
1558 if (opregion->header) {
1559 memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1560 seq_write(m, data, OPREGION_SIZE);
1563 mutex_unlock(&dev->struct_mutex);
1570 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1572 struct drm_info_node *node = (struct drm_info_node *) m->private;
1573 struct drm_device *dev = node->minor->dev;
1574 struct intel_fbdev *ifbdev = NULL;
1575 struct intel_framebuffer *fb;
1577 #ifdef CONFIG_DRM_I915_FBDEV
1578 struct drm_i915_private *dev_priv = dev->dev_private;
1579 int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1583 ifbdev = dev_priv->fbdev;
1584 fb = to_intel_framebuffer(ifbdev->helper.fb);
1586 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1590 fb->base.bits_per_pixel,
1591 atomic_read(&fb->base.refcount.refcount));
1592 describe_obj(m, fb->obj);
1594 mutex_unlock(&dev->mode_config.mutex);
1597 mutex_lock(&dev->mode_config.fb_lock);
1598 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1599 if (ifbdev && &fb->base == ifbdev->helper.fb)
1602 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1606 fb->base.bits_per_pixel,
1607 atomic_read(&fb->base.refcount.refcount));
1608 describe_obj(m, fb->obj);
1611 mutex_unlock(&dev->mode_config.fb_lock);
1616 static int i915_context_status(struct seq_file *m, void *unused)
1618 struct drm_info_node *node = (struct drm_info_node *) m->private;
1619 struct drm_device *dev = node->minor->dev;
1620 drm_i915_private_t *dev_priv = dev->dev_private;
1621 struct intel_ring_buffer *ring;
1622 struct i915_hw_context *ctx;
1625 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1629 if (dev_priv->ips.pwrctx) {
1630 seq_puts(m, "power context ");
1631 describe_obj(m, dev_priv->ips.pwrctx);
1635 if (dev_priv->ips.renderctx) {
1636 seq_puts(m, "render context ");
1637 describe_obj(m, dev_priv->ips.renderctx);
1641 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1642 seq_puts(m, "HW context ");
1643 describe_ctx(m, ctx);
1644 for_each_ring(ring, dev_priv, i)
1645 if (ring->default_context == ctx)
1646 seq_printf(m, "(default context %s) ", ring->name);
1648 describe_obj(m, ctx->obj);
1652 mutex_unlock(&dev->mode_config.mutex);
1657 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1659 struct drm_info_node *node = (struct drm_info_node *) m->private;
1660 struct drm_device *dev = node->minor->dev;
1661 struct drm_i915_private *dev_priv = dev->dev_private;
1662 unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
1664 spin_lock_irq(&dev_priv->uncore.lock);
1665 if (IS_VALLEYVIEW(dev)) {
1666 fw_rendercount = dev_priv->uncore.fw_rendercount;
1667 fw_mediacount = dev_priv->uncore.fw_mediacount;
1669 forcewake_count = dev_priv->uncore.forcewake_count;
1670 spin_unlock_irq(&dev_priv->uncore.lock);
1672 if (IS_VALLEYVIEW(dev)) {
1673 seq_printf(m, "fw_rendercount = %u\n", fw_rendercount);
1674 seq_printf(m, "fw_mediacount = %u\n", fw_mediacount);
1676 seq_printf(m, "forcewake count = %u\n", forcewake_count);
1681 static const char *swizzle_string(unsigned swizzle)
1684 case I915_BIT_6_SWIZZLE_NONE:
1686 case I915_BIT_6_SWIZZLE_9:
1688 case I915_BIT_6_SWIZZLE_9_10:
1689 return "bit9/bit10";
1690 case I915_BIT_6_SWIZZLE_9_11:
1691 return "bit9/bit11";
1692 case I915_BIT_6_SWIZZLE_9_10_11:
1693 return "bit9/bit10/bit11";
1694 case I915_BIT_6_SWIZZLE_9_17:
1695 return "bit9/bit17";
1696 case I915_BIT_6_SWIZZLE_9_10_17:
1697 return "bit9/bit10/bit17";
1698 case I915_BIT_6_SWIZZLE_UNKNOWN:
1705 static int i915_swizzle_info(struct seq_file *m, void *data)
1707 struct drm_info_node *node = (struct drm_info_node *) m->private;
1708 struct drm_device *dev = node->minor->dev;
1709 struct drm_i915_private *dev_priv = dev->dev_private;
1712 ret = mutex_lock_interruptible(&dev->struct_mutex);
1715 intel_runtime_pm_get(dev_priv);
1717 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1718 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1719 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1720 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1722 if (IS_GEN3(dev) || IS_GEN4(dev)) {
1723 seq_printf(m, "DDC = 0x%08x\n",
1725 seq_printf(m, "C0DRB3 = 0x%04x\n",
1726 I915_READ16(C0DRB3));
1727 seq_printf(m, "C1DRB3 = 0x%04x\n",
1728 I915_READ16(C1DRB3));
1729 } else if (INTEL_INFO(dev)->gen >= 6) {
1730 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1731 I915_READ(MAD_DIMM_C0));
1732 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1733 I915_READ(MAD_DIMM_C1));
1734 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1735 I915_READ(MAD_DIMM_C2));
1736 seq_printf(m, "TILECTL = 0x%08x\n",
1737 I915_READ(TILECTL));
1739 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1740 I915_READ(GAMTARBMODE));
1742 seq_printf(m, "ARB_MODE = 0x%08x\n",
1743 I915_READ(ARB_MODE));
1744 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1745 I915_READ(DISP_ARB_CTL));
1747 intel_runtime_pm_put(dev_priv);
1748 mutex_unlock(&dev->struct_mutex);
1753 static int per_file_ctx(int id, void *ptr, void *data)
1755 struct i915_hw_context *ctx = ptr;
1756 struct seq_file *m = data;
1757 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
1759 ppgtt->debug_dump(ppgtt, m);
1764 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1766 struct drm_i915_private *dev_priv = dev->dev_private;
1767 struct intel_ring_buffer *ring;
1768 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1774 seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
1775 seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
1776 for_each_ring(ring, dev_priv, unused) {
1777 seq_printf(m, "%s\n", ring->name);
1778 for (i = 0; i < 4; i++) {
1779 u32 offset = 0x270 + i * 8;
1780 u64 pdp = I915_READ(ring->mmio_base + offset + 4);
1782 pdp |= I915_READ(ring->mmio_base + offset);
1783 for (i = 0; i < 4; i++)
1784 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
1789 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1791 struct drm_i915_private *dev_priv = dev->dev_private;
1792 struct intel_ring_buffer *ring;
1793 struct drm_file *file;
1796 if (INTEL_INFO(dev)->gen == 6)
1797 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1799 for_each_ring(ring, dev_priv, i) {
1800 seq_printf(m, "%s\n", ring->name);
1801 if (INTEL_INFO(dev)->gen == 7)
1802 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
1803 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
1804 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
1805 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
1807 if (dev_priv->mm.aliasing_ppgtt) {
1808 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1810 seq_puts(m, "aliasing PPGTT:\n");
1811 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1813 ppgtt->debug_dump(ppgtt, m);
1817 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
1818 struct drm_i915_file_private *file_priv = file->driver_priv;
1819 struct i915_hw_ppgtt *pvt_ppgtt;
1821 pvt_ppgtt = ctx_to_ppgtt(file_priv->private_default_ctx);
1822 seq_printf(m, "proc: %s\n",
1823 get_pid_task(file->pid, PIDTYPE_PID)->comm);
1824 seq_puts(m, " default context:\n");
1825 idr_for_each(&file_priv->context_idr, per_file_ctx, m);
1827 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1830 static int i915_ppgtt_info(struct seq_file *m, void *data)
1832 struct drm_info_node *node = (struct drm_info_node *) m->private;
1833 struct drm_device *dev = node->minor->dev;
1834 struct drm_i915_private *dev_priv = dev->dev_private;
1836 int ret = mutex_lock_interruptible(&dev->struct_mutex);
1839 intel_runtime_pm_get(dev_priv);
1841 if (INTEL_INFO(dev)->gen >= 8)
1842 gen8_ppgtt_info(m, dev);
1843 else if (INTEL_INFO(dev)->gen >= 6)
1844 gen6_ppgtt_info(m, dev);
1846 intel_runtime_pm_put(dev_priv);
1847 mutex_unlock(&dev->struct_mutex);
1852 static int i915_dpio_info(struct seq_file *m, void *data)
1854 struct drm_info_node *node = (struct drm_info_node *) m->private;
1855 struct drm_device *dev = node->minor->dev;
1856 struct drm_i915_private *dev_priv = dev->dev_private;
1860 if (!IS_VALLEYVIEW(dev)) {
1861 seq_puts(m, "unsupported\n");
1865 ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
1869 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1871 seq_printf(m, "DPIO PLL DW3 CH0 : 0x%08x\n",
1872 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(0)));
1873 seq_printf(m, "DPIO PLL DW3 CH1: 0x%08x\n",
1874 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(1)));
1876 seq_printf(m, "DPIO PLL DW5 CH0: 0x%08x\n",
1877 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(0)));
1878 seq_printf(m, "DPIO PLL DW5 CH1: 0x%08x\n",
1879 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(1)));
1881 seq_printf(m, "DPIO PLL DW7 CH0: 0x%08x\n",
1882 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(0)));
1883 seq_printf(m, "DPIO PLL DW7 CH1: 0x%08x\n",
1884 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(1)));
1886 seq_printf(m, "DPIO PLL DW10 CH0: 0x%08x\n",
1887 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(0)));
1888 seq_printf(m, "DPIO PLL DW10 CH1: 0x%08x\n",
1889 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(1)));
1891 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1892 vlv_dpio_read(dev_priv, PIPE_A, VLV_CMN_DW0));
1894 mutex_unlock(&dev_priv->dpio_lock);
1899 static int i915_llc(struct seq_file *m, void *data)
1901 struct drm_info_node *node = (struct drm_info_node *) m->private;
1902 struct drm_device *dev = node->minor->dev;
1903 struct drm_i915_private *dev_priv = dev->dev_private;
1905 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
1906 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
1907 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
1912 static int i915_edp_psr_status(struct seq_file *m, void *data)
1914 struct drm_info_node *node = m->private;
1915 struct drm_device *dev = node->minor->dev;
1916 struct drm_i915_private *dev_priv = dev->dev_private;
1918 bool enabled = false;
1920 intel_runtime_pm_get(dev_priv);
1922 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
1923 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
1925 enabled = HAS_PSR(dev) &&
1926 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1927 seq_printf(m, "Enabled: %s\n", yesno(enabled));
1930 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
1931 EDP_PSR_PERF_CNT_MASK;
1932 seq_printf(m, "Performance_Counter: %u\n", psrperf);
1934 intel_runtime_pm_put(dev_priv);
1938 static int i915_sink_crc(struct seq_file *m, void *data)
1940 struct drm_info_node *node = m->private;
1941 struct drm_device *dev = node->minor->dev;
1942 struct intel_encoder *encoder;
1943 struct intel_connector *connector;
1944 struct intel_dp *intel_dp = NULL;
1948 drm_modeset_lock_all(dev);
1949 list_for_each_entry(connector, &dev->mode_config.connector_list,
1952 if (connector->base.dpms != DRM_MODE_DPMS_ON)
1955 if (!connector->base.encoder)
1958 encoder = to_intel_encoder(connector->base.encoder);
1959 if (encoder->type != INTEL_OUTPUT_EDP)
1962 intel_dp = enc_to_intel_dp(&encoder->base);
1964 ret = intel_dp_sink_crc(intel_dp, crc);
1968 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
1969 crc[0], crc[1], crc[2],
1970 crc[3], crc[4], crc[5]);
1975 drm_modeset_unlock_all(dev);
1979 static int i915_energy_uJ(struct seq_file *m, void *data)
1981 struct drm_info_node *node = m->private;
1982 struct drm_device *dev = node->minor->dev;
1983 struct drm_i915_private *dev_priv = dev->dev_private;
1987 if (INTEL_INFO(dev)->gen < 6)
1990 intel_runtime_pm_get(dev_priv);
1992 rdmsrl(MSR_RAPL_POWER_UNIT, power);
1993 power = (power & 0x1f00) >> 8;
1994 units = 1000000 / (1 << power); /* convert to uJ */
1995 power = I915_READ(MCH_SECP_NRG_STTS);
1998 intel_runtime_pm_put(dev_priv);
2000 seq_printf(m, "%llu", (long long unsigned)power);
2005 static int i915_pc8_status(struct seq_file *m, void *unused)
2007 struct drm_info_node *node = (struct drm_info_node *) m->private;
2008 struct drm_device *dev = node->minor->dev;
2009 struct drm_i915_private *dev_priv = dev->dev_private;
2011 if (!IS_HASWELL(dev)) {
2012 seq_puts(m, "not supported\n");
2016 mutex_lock(&dev_priv->pc8.lock);
2017 seq_printf(m, "Requirements met: %s\n",
2018 yesno(dev_priv->pc8.requirements_met));
2019 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
2020 seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count);
2021 seq_printf(m, "IRQs disabled: %s\n",
2022 yesno(dev_priv->pc8.irqs_disabled));
2023 seq_printf(m, "Enabled: %s\n", yesno(dev_priv->pc8.enabled));
2024 mutex_unlock(&dev_priv->pc8.lock);
2029 static const char *power_domain_str(enum intel_display_power_domain domain)
2032 case POWER_DOMAIN_PIPE_A:
2034 case POWER_DOMAIN_PIPE_B:
2036 case POWER_DOMAIN_PIPE_C:
2038 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
2039 return "PIPE_A_PANEL_FITTER";
2040 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
2041 return "PIPE_B_PANEL_FITTER";
2042 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
2043 return "PIPE_C_PANEL_FITTER";
2044 case POWER_DOMAIN_TRANSCODER_A:
2045 return "TRANSCODER_A";
2046 case POWER_DOMAIN_TRANSCODER_B:
2047 return "TRANSCODER_B";
2048 case POWER_DOMAIN_TRANSCODER_C:
2049 return "TRANSCODER_C";
2050 case POWER_DOMAIN_TRANSCODER_EDP:
2051 return "TRANSCODER_EDP";
2052 case POWER_DOMAIN_VGA:
2054 case POWER_DOMAIN_AUDIO:
2056 case POWER_DOMAIN_INIT:
2064 static int i915_power_domain_info(struct seq_file *m, void *unused)
2066 struct drm_info_node *node = (struct drm_info_node *) m->private;
2067 struct drm_device *dev = node->minor->dev;
2068 struct drm_i915_private *dev_priv = dev->dev_private;
2069 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2072 mutex_lock(&power_domains->lock);
2074 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2075 for (i = 0; i < power_domains->power_well_count; i++) {
2076 struct i915_power_well *power_well;
2077 enum intel_display_power_domain power_domain;
2079 power_well = &power_domains->power_wells[i];
2080 seq_printf(m, "%-25s %d\n", power_well->name,
2083 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
2085 if (!(BIT(power_domain) & power_well->domains))
2088 seq_printf(m, " %-23s %d\n",
2089 power_domain_str(power_domain),
2090 power_domains->domain_use_count[power_domain]);
2094 mutex_unlock(&power_domains->lock);
2099 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2100 struct drm_display_mode *mode)
2104 for (i = 0; i < tabs; i++)
2107 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2108 mode->base.id, mode->name,
2109 mode->vrefresh, mode->clock,
2110 mode->hdisplay, mode->hsync_start,
2111 mode->hsync_end, mode->htotal,
2112 mode->vdisplay, mode->vsync_start,
2113 mode->vsync_end, mode->vtotal,
2114 mode->type, mode->flags);
2117 static void intel_encoder_info(struct seq_file *m,
2118 struct intel_crtc *intel_crtc,
2119 struct intel_encoder *intel_encoder)
2121 struct drm_info_node *node = (struct drm_info_node *) m->private;
2122 struct drm_device *dev = node->minor->dev;
2123 struct drm_crtc *crtc = &intel_crtc->base;
2124 struct intel_connector *intel_connector;
2125 struct drm_encoder *encoder;
2127 encoder = &intel_encoder->base;
2128 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2129 encoder->base.id, drm_get_encoder_name(encoder));
2130 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2131 struct drm_connector *connector = &intel_connector->base;
2132 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2134 drm_get_connector_name(connector),
2135 drm_get_connector_status_name(connector->status));
2136 if (connector->status == connector_status_connected) {
2137 struct drm_display_mode *mode = &crtc->mode;
2138 seq_printf(m, ", mode:\n");
2139 intel_seq_print_mode(m, 2, mode);
2146 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2148 struct drm_info_node *node = (struct drm_info_node *) m->private;
2149 struct drm_device *dev = node->minor->dev;
2150 struct drm_crtc *crtc = &intel_crtc->base;
2151 struct intel_encoder *intel_encoder;
2153 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2154 crtc->fb->base.id, crtc->x, crtc->y,
2155 crtc->fb->width, crtc->fb->height);
2156 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2157 intel_encoder_info(m, intel_crtc, intel_encoder);
2160 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2162 struct drm_display_mode *mode = panel->fixed_mode;
2164 seq_printf(m, "\tfixed mode:\n");
2165 intel_seq_print_mode(m, 2, mode);
2168 static void intel_dp_info(struct seq_file *m,
2169 struct intel_connector *intel_connector)
2171 struct intel_encoder *intel_encoder = intel_connector->encoder;
2172 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2174 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2175 seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" :
2177 if (intel_encoder->type == INTEL_OUTPUT_EDP)
2178 intel_panel_info(m, &intel_connector->panel);
2181 static void intel_hdmi_info(struct seq_file *m,
2182 struct intel_connector *intel_connector)
2184 struct intel_encoder *intel_encoder = intel_connector->encoder;
2185 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2187 seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" :
2191 static void intel_lvds_info(struct seq_file *m,
2192 struct intel_connector *intel_connector)
2194 intel_panel_info(m, &intel_connector->panel);
2197 static void intel_connector_info(struct seq_file *m,
2198 struct drm_connector *connector)
2200 struct intel_connector *intel_connector = to_intel_connector(connector);
2201 struct intel_encoder *intel_encoder = intel_connector->encoder;
2202 struct drm_display_mode *mode;
2204 seq_printf(m, "connector %d: type %s, status: %s\n",
2205 connector->base.id, drm_get_connector_name(connector),
2206 drm_get_connector_status_name(connector->status));
2207 if (connector->status == connector_status_connected) {
2208 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2209 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2210 connector->display_info.width_mm,
2211 connector->display_info.height_mm);
2212 seq_printf(m, "\tsubpixel order: %s\n",
2213 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2214 seq_printf(m, "\tCEA rev: %d\n",
2215 connector->display_info.cea_rev);
2217 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2218 intel_encoder->type == INTEL_OUTPUT_EDP)
2219 intel_dp_info(m, intel_connector);
2220 else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
2221 intel_hdmi_info(m, intel_connector);
2222 else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2223 intel_lvds_info(m, intel_connector);
2225 seq_printf(m, "\tmodes:\n");
2226 list_for_each_entry(mode, &connector->modes, head)
2227 intel_seq_print_mode(m, 2, mode);
2230 static int i915_display_info(struct seq_file *m, void *unused)
2232 struct drm_info_node *node = (struct drm_info_node *) m->private;
2233 struct drm_device *dev = node->minor->dev;
2234 struct drm_crtc *crtc;
2235 struct drm_connector *connector;
2237 drm_modeset_lock_all(dev);
2238 seq_printf(m, "CRTC info\n");
2239 seq_printf(m, "---------\n");
2240 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2241 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2243 seq_printf(m, "CRTC %d: pipe: %c, active: %s\n",
2244 crtc->base.id, pipe_name(intel_crtc->pipe),
2245 intel_crtc->active ? "yes" : "no");
2246 if (intel_crtc->active)
2247 intel_crtc_info(m, intel_crtc);
2250 seq_printf(m, "\n");
2251 seq_printf(m, "Connector info\n");
2252 seq_printf(m, "--------------\n");
2253 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2254 intel_connector_info(m, connector);
2256 drm_modeset_unlock_all(dev);
2261 struct pipe_crc_info {
2263 struct drm_device *dev;
2267 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
2269 struct pipe_crc_info *info = inode->i_private;
2270 struct drm_i915_private *dev_priv = info->dev->dev_private;
2271 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
2273 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
2276 spin_lock_irq(&pipe_crc->lock);
2278 if (pipe_crc->opened) {
2279 spin_unlock_irq(&pipe_crc->lock);
2280 return -EBUSY; /* already open */
2283 pipe_crc->opened = true;
2284 filep->private_data = inode->i_private;
2286 spin_unlock_irq(&pipe_crc->lock);
2291 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
2293 struct pipe_crc_info *info = inode->i_private;
2294 struct drm_i915_private *dev_priv = info->dev->dev_private;
2295 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
2297 spin_lock_irq(&pipe_crc->lock);
2298 pipe_crc->opened = false;
2299 spin_unlock_irq(&pipe_crc->lock);
2304 /* (6 fields, 8 chars each, space separated (5) + '\n') */
2305 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
2306 /* account for \'0' */
2307 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
2309 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
2311 assert_spin_locked(&pipe_crc->lock);
2312 return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
2313 INTEL_PIPE_CRC_ENTRIES_NR);
2317 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
2320 struct pipe_crc_info *info = filep->private_data;
2321 struct drm_device *dev = info->dev;
2322 struct drm_i915_private *dev_priv = dev->dev_private;
2323 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
2324 char buf[PIPE_CRC_BUFFER_LEN];
2325 int head, tail, n_entries, n;
2329 * Don't allow user space to provide buffers not big enough to hold
2332 if (count < PIPE_CRC_LINE_LEN)
2335 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
2338 /* nothing to read */
2339 spin_lock_irq(&pipe_crc->lock);
2340 while (pipe_crc_data_count(pipe_crc) == 0) {
2343 if (filep->f_flags & O_NONBLOCK) {
2344 spin_unlock_irq(&pipe_crc->lock);
2348 ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
2349 pipe_crc_data_count(pipe_crc), pipe_crc->lock);
2351 spin_unlock_irq(&pipe_crc->lock);
2356 /* We now have one or more entries to read */
2357 head = pipe_crc->head;
2358 tail = pipe_crc->tail;
2359 n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
2360 count / PIPE_CRC_LINE_LEN);
2361 spin_unlock_irq(&pipe_crc->lock);
2366 struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
2369 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
2370 "%8u %8x %8x %8x %8x %8x\n",
2371 entry->frame, entry->crc[0],
2372 entry->crc[1], entry->crc[2],
2373 entry->crc[3], entry->crc[4]);
2375 ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
2376 buf, PIPE_CRC_LINE_LEN);
2377 if (ret == PIPE_CRC_LINE_LEN)
2380 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
2381 tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
2383 } while (--n_entries);
2385 spin_lock_irq(&pipe_crc->lock);
2386 pipe_crc->tail = tail;
2387 spin_unlock_irq(&pipe_crc->lock);
2392 static const struct file_operations i915_pipe_crc_fops = {
2393 .owner = THIS_MODULE,
2394 .open = i915_pipe_crc_open,
2395 .read = i915_pipe_crc_read,
2396 .release = i915_pipe_crc_release,
2399 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
2401 .name = "i915_pipe_A_crc",
2405 .name = "i915_pipe_B_crc",
2409 .name = "i915_pipe_C_crc",
2414 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
2417 struct drm_device *dev = minor->dev;
2419 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
2422 ent = debugfs_create_file(info->name, S_IRUGO, root, info,
2423 &i915_pipe_crc_fops);
2427 return drm_add_fake_info_node(minor, ent, info);
2430 static const char * const pipe_crc_sources[] = {
2443 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
2445 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
2446 return pipe_crc_sources[source];
2449 static int display_crc_ctl_show(struct seq_file *m, void *data)
2451 struct drm_device *dev = m->private;
2452 struct drm_i915_private *dev_priv = dev->dev_private;
2455 for (i = 0; i < I915_MAX_PIPES; i++)
2456 seq_printf(m, "%c %s\n", pipe_name(i),
2457 pipe_crc_source_name(dev_priv->pipe_crc[i].source));
2462 static int display_crc_ctl_open(struct inode *inode, struct file *file)
2464 struct drm_device *dev = inode->i_private;
2466 return single_open(file, display_crc_ctl_show, dev);
2469 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2472 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2473 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2476 case INTEL_PIPE_CRC_SOURCE_PIPE:
2477 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
2479 case INTEL_PIPE_CRC_SOURCE_NONE:
2489 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
2490 enum intel_pipe_crc_source *source)
2492 struct intel_encoder *encoder;
2493 struct intel_crtc *crtc;
2494 struct intel_digital_port *dig_port;
2497 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2499 mutex_lock(&dev->mode_config.mutex);
2500 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2502 if (!encoder->base.crtc)
2505 crtc = to_intel_crtc(encoder->base.crtc);
2507 if (crtc->pipe != pipe)
2510 switch (encoder->type) {
2511 case INTEL_OUTPUT_TVOUT:
2512 *source = INTEL_PIPE_CRC_SOURCE_TV;
2514 case INTEL_OUTPUT_DISPLAYPORT:
2515 case INTEL_OUTPUT_EDP:
2516 dig_port = enc_to_dig_port(&encoder->base);
2517 switch (dig_port->port) {
2519 *source = INTEL_PIPE_CRC_SOURCE_DP_B;
2522 *source = INTEL_PIPE_CRC_SOURCE_DP_C;
2525 *source = INTEL_PIPE_CRC_SOURCE_DP_D;
2528 WARN(1, "nonexisting DP port %c\n",
2529 port_name(dig_port->port));
2535 mutex_unlock(&dev->mode_config.mutex);
2540 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
2542 enum intel_pipe_crc_source *source,
2545 struct drm_i915_private *dev_priv = dev->dev_private;
2546 bool need_stable_symbols = false;
2548 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
2549 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
2555 case INTEL_PIPE_CRC_SOURCE_PIPE:
2556 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
2558 case INTEL_PIPE_CRC_SOURCE_DP_B:
2559 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
2560 need_stable_symbols = true;
2562 case INTEL_PIPE_CRC_SOURCE_DP_C:
2563 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
2564 need_stable_symbols = true;
2566 case INTEL_PIPE_CRC_SOURCE_NONE:
2574 * When the pipe CRC tap point is after the transcoders we need
2575 * to tweak symbol-level features to produce a deterministic series of
2576 * symbols for a given frame. We need to reset those features only once
2577 * a frame (instead of every nth symbol):
2578 * - DC-balance: used to ensure a better clock recovery from the data
2580 * - DisplayPort scrambling: used for EMI reduction
2582 if (need_stable_symbols) {
2583 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2585 WARN_ON(!IS_G4X(dev));
2587 tmp |= DC_BALANCE_RESET_VLV;
2589 tmp |= PIPE_A_SCRAMBLE_RESET;
2591 tmp |= PIPE_B_SCRAMBLE_RESET;
2593 I915_WRITE(PORT_DFT2_G4X, tmp);
2599 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
2601 enum intel_pipe_crc_source *source,
2604 struct drm_i915_private *dev_priv = dev->dev_private;
2605 bool need_stable_symbols = false;
2607 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
2608 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
2614 case INTEL_PIPE_CRC_SOURCE_PIPE:
2615 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
2617 case INTEL_PIPE_CRC_SOURCE_TV:
2618 if (!SUPPORTS_TV(dev))
2620 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
2622 case INTEL_PIPE_CRC_SOURCE_DP_B:
2625 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
2626 need_stable_symbols = true;
2628 case INTEL_PIPE_CRC_SOURCE_DP_C:
2631 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
2632 need_stable_symbols = true;
2634 case INTEL_PIPE_CRC_SOURCE_DP_D:
2637 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
2638 need_stable_symbols = true;
2640 case INTEL_PIPE_CRC_SOURCE_NONE:
2648 * When the pipe CRC tap point is after the transcoders we need
2649 * to tweak symbol-level features to produce a deterministic series of
2650 * symbols for a given frame. We need to reset those features only once
2651 * a frame (instead of every nth symbol):
2652 * - DC-balance: used to ensure a better clock recovery from the data
2654 * - DisplayPort scrambling: used for EMI reduction
2656 if (need_stable_symbols) {
2657 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2659 WARN_ON(!IS_G4X(dev));
2661 I915_WRITE(PORT_DFT_I9XX,
2662 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
2665 tmp |= PIPE_A_SCRAMBLE_RESET;
2667 tmp |= PIPE_B_SCRAMBLE_RESET;
2669 I915_WRITE(PORT_DFT2_G4X, tmp);
2675 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
2678 struct drm_i915_private *dev_priv = dev->dev_private;
2679 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2682 tmp &= ~PIPE_A_SCRAMBLE_RESET;
2684 tmp &= ~PIPE_B_SCRAMBLE_RESET;
2685 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
2686 tmp &= ~DC_BALANCE_RESET_VLV;
2687 I915_WRITE(PORT_DFT2_G4X, tmp);
2691 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
2694 struct drm_i915_private *dev_priv = dev->dev_private;
2695 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2698 tmp &= ~PIPE_A_SCRAMBLE_RESET;
2700 tmp &= ~PIPE_B_SCRAMBLE_RESET;
2701 I915_WRITE(PORT_DFT2_G4X, tmp);
2703 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
2704 I915_WRITE(PORT_DFT_I9XX,
2705 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
2709 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2712 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2713 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2716 case INTEL_PIPE_CRC_SOURCE_PLANE1:
2717 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
2719 case INTEL_PIPE_CRC_SOURCE_PLANE2:
2720 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
2722 case INTEL_PIPE_CRC_SOURCE_PIPE:
2723 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
2725 case INTEL_PIPE_CRC_SOURCE_NONE:
2735 static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2738 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2739 *source = INTEL_PIPE_CRC_SOURCE_PF;
2742 case INTEL_PIPE_CRC_SOURCE_PLANE1:
2743 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
2745 case INTEL_PIPE_CRC_SOURCE_PLANE2:
2746 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
2748 case INTEL_PIPE_CRC_SOURCE_PF:
2749 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
2751 case INTEL_PIPE_CRC_SOURCE_NONE:
2761 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
2762 enum intel_pipe_crc_source source)
2764 struct drm_i915_private *dev_priv = dev->dev_private;
2765 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
2766 u32 val = 0; /* shut up gcc */
2769 if (pipe_crc->source == source)
2772 /* forbid changing the source without going back to 'none' */
2773 if (pipe_crc->source && source)
2777 ret = i8xx_pipe_crc_ctl_reg(&source, &val);
2778 else if (INTEL_INFO(dev)->gen < 5)
2779 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
2780 else if (IS_VALLEYVIEW(dev))
2781 ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
2782 else if (IS_GEN5(dev) || IS_GEN6(dev))
2783 ret = ilk_pipe_crc_ctl_reg(&source, &val);
2785 ret = ivb_pipe_crc_ctl_reg(&source, &val);
2790 /* none -> real source transition */
2792 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
2793 pipe_name(pipe), pipe_crc_source_name(source));
2795 pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
2796 INTEL_PIPE_CRC_ENTRIES_NR,
2798 if (!pipe_crc->entries)
2801 spin_lock_irq(&pipe_crc->lock);
2804 spin_unlock_irq(&pipe_crc->lock);
2807 pipe_crc->source = source;
2809 I915_WRITE(PIPE_CRC_CTL(pipe), val);
2810 POSTING_READ(PIPE_CRC_CTL(pipe));
2812 /* real source -> none transition */
2813 if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
2814 struct intel_pipe_crc_entry *entries;
2816 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
2819 intel_wait_for_vblank(dev, pipe);
2821 spin_lock_irq(&pipe_crc->lock);
2822 entries = pipe_crc->entries;
2823 pipe_crc->entries = NULL;
2824 spin_unlock_irq(&pipe_crc->lock);
2829 g4x_undo_pipe_scramble_reset(dev, pipe);
2830 else if (IS_VALLEYVIEW(dev))
2831 vlv_undo_pipe_scramble_reset(dev, pipe);
2838 * Parse pipe CRC command strings:
2839 * command: wsp* object wsp+ name wsp+ source wsp*
2842 * source: (none | plane1 | plane2 | pf)
2843 * wsp: (#0x20 | #0x9 | #0xA)+
2846 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
2847 * "pipe A none" -> Stop CRC
2849 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
2856 /* skip leading white space */
2857 buf = skip_spaces(buf);
2859 break; /* end of buffer */
2861 /* find end of word */
2862 for (end = buf; *end && !isspace(*end); end++)
2865 if (n_words == max_words) {
2866 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
2868 return -EINVAL; /* ran out of words[] before bytes */
2873 words[n_words++] = buf;
2880 enum intel_pipe_crc_object {
2881 PIPE_CRC_OBJECT_PIPE,
2884 static const char * const pipe_crc_objects[] = {
2889 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
2893 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
2894 if (!strcmp(buf, pipe_crc_objects[i])) {
2902 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
2904 const char name = buf[0];
2906 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
2915 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
2919 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
2920 if (!strcmp(buf, pipe_crc_sources[i])) {
2928 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
2932 char *words[N_WORDS];
2934 enum intel_pipe_crc_object object;
2935 enum intel_pipe_crc_source source;
2937 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
2938 if (n_words != N_WORDS) {
2939 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
2944 if (display_crc_ctl_parse_object(words[0], &object) < 0) {
2945 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
2949 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
2950 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
2954 if (display_crc_ctl_parse_source(words[2], &source) < 0) {
2955 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
2959 return pipe_crc_set_source(dev, pipe, source);
2962 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
2963 size_t len, loff_t *offp)
2965 struct seq_file *m = file->private_data;
2966 struct drm_device *dev = m->private;
2973 if (len > PAGE_SIZE - 1) {
2974 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
2979 tmpbuf = kmalloc(len + 1, GFP_KERNEL);
2983 if (copy_from_user(tmpbuf, ubuf, len)) {
2989 ret = display_crc_ctl_parse(dev, tmpbuf, len);
3000 static const struct file_operations i915_display_crc_ctl_fops = {
3001 .owner = THIS_MODULE,
3002 .open = display_crc_ctl_open,
3004 .llseek = seq_lseek,
3005 .release = single_release,
3006 .write = display_crc_ctl_write
3009 static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
3011 struct drm_device *dev = m->private;
3012 int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
3015 drm_modeset_lock_all(dev);
3017 for (level = 0; level < num_levels; level++) {
3018 unsigned int latency = wm[level];
3020 /* WM1+ latency values in 0.5us units */
3024 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3026 latency / 10, latency % 10);
3029 drm_modeset_unlock_all(dev);
3032 static int pri_wm_latency_show(struct seq_file *m, void *data)
3034 struct drm_device *dev = m->private;
3036 wm_latency_show(m, to_i915(dev)->wm.pri_latency);
3041 static int spr_wm_latency_show(struct seq_file *m, void *data)
3043 struct drm_device *dev = m->private;
3045 wm_latency_show(m, to_i915(dev)->wm.spr_latency);
3050 static int cur_wm_latency_show(struct seq_file *m, void *data)
3052 struct drm_device *dev = m->private;
3054 wm_latency_show(m, to_i915(dev)->wm.cur_latency);
3059 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3061 struct drm_device *dev = inode->i_private;
3063 if (!HAS_PCH_SPLIT(dev))
3066 return single_open(file, pri_wm_latency_show, dev);
3069 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3071 struct drm_device *dev = inode->i_private;
3073 if (!HAS_PCH_SPLIT(dev))
3076 return single_open(file, spr_wm_latency_show, dev);
3079 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3081 struct drm_device *dev = inode->i_private;
3083 if (!HAS_PCH_SPLIT(dev))
3086 return single_open(file, cur_wm_latency_show, dev);
3089 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3090 size_t len, loff_t *offp, uint16_t wm[5])
3092 struct seq_file *m = file->private_data;
3093 struct drm_device *dev = m->private;
3094 uint16_t new[5] = { 0 };
3095 int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
3100 if (len >= sizeof(tmp))
3103 if (copy_from_user(tmp, ubuf, len))
3108 ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]);
3109 if (ret != num_levels)
3112 drm_modeset_lock_all(dev);
3114 for (level = 0; level < num_levels; level++)
3115 wm[level] = new[level];
3117 drm_modeset_unlock_all(dev);
3123 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3124 size_t len, loff_t *offp)
3126 struct seq_file *m = file->private_data;
3127 struct drm_device *dev = m->private;
3129 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency);
3132 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3133 size_t len, loff_t *offp)
3135 struct seq_file *m = file->private_data;
3136 struct drm_device *dev = m->private;
3138 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency);
3141 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3142 size_t len, loff_t *offp)
3144 struct seq_file *m = file->private_data;
3145 struct drm_device *dev = m->private;
3147 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency);
3150 static const struct file_operations i915_pri_wm_latency_fops = {
3151 .owner = THIS_MODULE,
3152 .open = pri_wm_latency_open,
3154 .llseek = seq_lseek,
3155 .release = single_release,
3156 .write = pri_wm_latency_write
3159 static const struct file_operations i915_spr_wm_latency_fops = {
3160 .owner = THIS_MODULE,
3161 .open = spr_wm_latency_open,
3163 .llseek = seq_lseek,
3164 .release = single_release,
3165 .write = spr_wm_latency_write
3168 static const struct file_operations i915_cur_wm_latency_fops = {
3169 .owner = THIS_MODULE,
3170 .open = cur_wm_latency_open,
3172 .llseek = seq_lseek,
3173 .release = single_release,
3174 .write = cur_wm_latency_write
3178 i915_wedged_get(void *data, u64 *val)
3180 struct drm_device *dev = data;
3181 drm_i915_private_t *dev_priv = dev->dev_private;
3183 *val = atomic_read(&dev_priv->gpu_error.reset_counter);
3189 i915_wedged_set(void *data, u64 val)
3191 struct drm_device *dev = data;
3193 DRM_INFO("Manually setting wedged to %llu\n", val);
3194 i915_handle_error(dev, val);
3199 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3200 i915_wedged_get, i915_wedged_set,
3204 i915_ring_stop_get(void *data, u64 *val)
3206 struct drm_device *dev = data;
3207 drm_i915_private_t *dev_priv = dev->dev_private;
3209 *val = dev_priv->gpu_error.stop_rings;
3215 i915_ring_stop_set(void *data, u64 val)
3217 struct drm_device *dev = data;
3218 struct drm_i915_private *dev_priv = dev->dev_private;
3221 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
3223 ret = mutex_lock_interruptible(&dev->struct_mutex);
3227 dev_priv->gpu_error.stop_rings = val;
3228 mutex_unlock(&dev->struct_mutex);
3233 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
3234 i915_ring_stop_get, i915_ring_stop_set,
3238 i915_ring_missed_irq_get(void *data, u64 *val)
3240 struct drm_device *dev = data;
3241 struct drm_i915_private *dev_priv = dev->dev_private;
3243 *val = dev_priv->gpu_error.missed_irq_rings;
3248 i915_ring_missed_irq_set(void *data, u64 val)
3250 struct drm_device *dev = data;
3251 struct drm_i915_private *dev_priv = dev->dev_private;
3254 /* Lock against concurrent debugfs callers */
3255 ret = mutex_lock_interruptible(&dev->struct_mutex);
3258 dev_priv->gpu_error.missed_irq_rings = val;
3259 mutex_unlock(&dev->struct_mutex);
3264 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
3265 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
3269 i915_ring_test_irq_get(void *data, u64 *val)
3271 struct drm_device *dev = data;
3272 struct drm_i915_private *dev_priv = dev->dev_private;
3274 *val = dev_priv->gpu_error.test_irq_rings;
3280 i915_ring_test_irq_set(void *data, u64 val)
3282 struct drm_device *dev = data;
3283 struct drm_i915_private *dev_priv = dev->dev_private;
3286 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
3288 /* Lock against concurrent debugfs callers */
3289 ret = mutex_lock_interruptible(&dev->struct_mutex);
3293 dev_priv->gpu_error.test_irq_rings = val;
3294 mutex_unlock(&dev->struct_mutex);
3299 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
3300 i915_ring_test_irq_get, i915_ring_test_irq_set,
3303 #define DROP_UNBOUND 0x1
3304 #define DROP_BOUND 0x2
3305 #define DROP_RETIRE 0x4
3306 #define DROP_ACTIVE 0x8
3307 #define DROP_ALL (DROP_UNBOUND | \
3312 i915_drop_caches_get(void *data, u64 *val)
3320 i915_drop_caches_set(void *data, u64 val)
3322 struct drm_device *dev = data;
3323 struct drm_i915_private *dev_priv = dev->dev_private;
3324 struct drm_i915_gem_object *obj, *next;
3325 struct i915_address_space *vm;
3326 struct i915_vma *vma, *x;
3329 DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
3331 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3332 * on ioctls on -EAGAIN. */
3333 ret = mutex_lock_interruptible(&dev->struct_mutex);
3337 if (val & DROP_ACTIVE) {
3338 ret = i915_gpu_idle(dev);
3343 if (val & (DROP_RETIRE | DROP_ACTIVE))
3344 i915_gem_retire_requests(dev);
3346 if (val & DROP_BOUND) {
3347 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
3348 list_for_each_entry_safe(vma, x, &vm->inactive_list,
3353 ret = i915_vma_unbind(vma);
3360 if (val & DROP_UNBOUND) {
3361 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
3363 if (obj->pages_pin_count == 0) {
3364 ret = i915_gem_object_put_pages(obj);
3371 mutex_unlock(&dev->struct_mutex);
3376 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3377 i915_drop_caches_get, i915_drop_caches_set,
3381 i915_max_freq_get(void *data, u64 *val)
3383 struct drm_device *dev = data;
3384 drm_i915_private_t *dev_priv = dev->dev_private;
3387 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3390 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3392 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3396 if (IS_VALLEYVIEW(dev))
3397 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
3399 *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
3400 mutex_unlock(&dev_priv->rps.hw_lock);
3406 i915_max_freq_set(void *data, u64 val)
3408 struct drm_device *dev = data;
3409 struct drm_i915_private *dev_priv = dev->dev_private;
3410 u32 rp_state_cap, hw_max, hw_min;
3413 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3416 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3418 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
3420 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3425 * Turbo will still be enabled, but won't go above the set value.
3427 if (IS_VALLEYVIEW(dev)) {
3428 val = vlv_freq_opcode(dev_priv, val);
3430 hw_max = valleyview_rps_max_freq(dev_priv);
3431 hw_min = valleyview_rps_min_freq(dev_priv);
3433 do_div(val, GT_FREQUENCY_MULTIPLIER);
3435 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3436 hw_max = dev_priv->rps.hw_max;
3437 hw_min = (rp_state_cap >> 16) & 0xff;
3440 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
3441 mutex_unlock(&dev_priv->rps.hw_lock);
3445 dev_priv->rps.max_delay = val;
3447 if (IS_VALLEYVIEW(dev))
3448 valleyview_set_rps(dev, val);
3450 gen6_set_rps(dev, val);
3452 mutex_unlock(&dev_priv->rps.hw_lock);
3457 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
3458 i915_max_freq_get, i915_max_freq_set,
3462 i915_min_freq_get(void *data, u64 *val)
3464 struct drm_device *dev = data;
3465 drm_i915_private_t *dev_priv = dev->dev_private;
3468 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3471 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3473 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3477 if (IS_VALLEYVIEW(dev))
3478 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
3480 *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
3481 mutex_unlock(&dev_priv->rps.hw_lock);
3487 i915_min_freq_set(void *data, u64 val)
3489 struct drm_device *dev = data;
3490 struct drm_i915_private *dev_priv = dev->dev_private;
3491 u32 rp_state_cap, hw_max, hw_min;
3494 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3497 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3499 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
3501 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3506 * Turbo will still be enabled, but won't go below the set value.
3508 if (IS_VALLEYVIEW(dev)) {
3509 val = vlv_freq_opcode(dev_priv, val);
3511 hw_max = valleyview_rps_max_freq(dev_priv);
3512 hw_min = valleyview_rps_min_freq(dev_priv);
3514 do_div(val, GT_FREQUENCY_MULTIPLIER);
3516 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3517 hw_max = dev_priv->rps.hw_max;
3518 hw_min = (rp_state_cap >> 16) & 0xff;
3521 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
3522 mutex_unlock(&dev_priv->rps.hw_lock);
3526 dev_priv->rps.min_delay = val;
3528 if (IS_VALLEYVIEW(dev))
3529 valleyview_set_rps(dev, val);
3531 gen6_set_rps(dev, val);
3533 mutex_unlock(&dev_priv->rps.hw_lock);
3538 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
3539 i915_min_freq_get, i915_min_freq_set,
3543 i915_cache_sharing_get(void *data, u64 *val)
3545 struct drm_device *dev = data;
3546 drm_i915_private_t *dev_priv = dev->dev_private;
3550 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3553 ret = mutex_lock_interruptible(&dev->struct_mutex);
3556 intel_runtime_pm_get(dev_priv);
3558 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3560 intel_runtime_pm_put(dev_priv);
3561 mutex_unlock(&dev_priv->dev->struct_mutex);
3563 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3569 i915_cache_sharing_set(void *data, u64 val)
3571 struct drm_device *dev = data;
3572 struct drm_i915_private *dev_priv = dev->dev_private;
3575 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3581 intel_runtime_pm_get(dev_priv);
3582 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3584 /* Update the cache sharing policy here as well */
3585 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3586 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3587 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
3588 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3590 intel_runtime_pm_put(dev_priv);
3594 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3595 i915_cache_sharing_get, i915_cache_sharing_set,
3598 static int i915_forcewake_open(struct inode *inode, struct file *file)
3600 struct drm_device *dev = inode->i_private;
3601 struct drm_i915_private *dev_priv = dev->dev_private;
3603 if (INTEL_INFO(dev)->gen < 6)
3606 intel_runtime_pm_get(dev_priv);
3607 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3612 static int i915_forcewake_release(struct inode *inode, struct file *file)
3614 struct drm_device *dev = inode->i_private;
3615 struct drm_i915_private *dev_priv = dev->dev_private;
3617 if (INTEL_INFO(dev)->gen < 6)
3620 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3621 intel_runtime_pm_put(dev_priv);
3626 static const struct file_operations i915_forcewake_fops = {
3627 .owner = THIS_MODULE,
3628 .open = i915_forcewake_open,
3629 .release = i915_forcewake_release,
3632 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
3634 struct drm_device *dev = minor->dev;
3637 ent = debugfs_create_file("i915_forcewake_user",
3640 &i915_forcewake_fops);
3644 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
3647 static int i915_debugfs_create(struct dentry *root,
3648 struct drm_minor *minor,
3650 const struct file_operations *fops)
3652 struct drm_device *dev = minor->dev;
3655 ent = debugfs_create_file(name,
3662 return drm_add_fake_info_node(minor, ent, fops);
3665 static const struct drm_info_list i915_debugfs_list[] = {
3666 {"i915_capabilities", i915_capabilities, 0},
3667 {"i915_gem_objects", i915_gem_object_info, 0},
3668 {"i915_gem_gtt", i915_gem_gtt_info, 0},
3669 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
3670 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
3671 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
3672 {"i915_gem_stolen", i915_gem_stolen_list_info },
3673 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
3674 {"i915_gem_request", i915_gem_request_info, 0},
3675 {"i915_gem_seqno", i915_gem_seqno_info, 0},
3676 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
3677 {"i915_gem_interrupt", i915_interrupt_info, 0},
3678 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
3679 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
3680 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
3681 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
3682 {"i915_rstdby_delays", i915_rstdby_delays, 0},
3683 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
3684 {"i915_delayfreq_table", i915_delayfreq_table, 0},
3685 {"i915_inttoext_table", i915_inttoext_table, 0},
3686 {"i915_drpc_info", i915_drpc_info, 0},
3687 {"i915_emon_status", i915_emon_status, 0},
3688 {"i915_ring_freq_table", i915_ring_freq_table, 0},
3689 {"i915_gfxec", i915_gfxec, 0},
3690 {"i915_fbc_status", i915_fbc_status, 0},
3691 {"i915_ips_status", i915_ips_status, 0},
3692 {"i915_sr_status", i915_sr_status, 0},
3693 {"i915_opregion", i915_opregion, 0},
3694 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
3695 {"i915_context_status", i915_context_status, 0},
3696 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
3697 {"i915_swizzle_info", i915_swizzle_info, 0},
3698 {"i915_ppgtt_info", i915_ppgtt_info, 0},
3699 {"i915_dpio", i915_dpio_info, 0},
3700 {"i915_llc", i915_llc, 0},
3701 {"i915_edp_psr_status", i915_edp_psr_status, 0},
3702 {"i915_sink_crc_eDP1", i915_sink_crc, 0},
3703 {"i915_energy_uJ", i915_energy_uJ, 0},
3704 {"i915_pc8_status", i915_pc8_status, 0},
3705 {"i915_power_domain_info", i915_power_domain_info, 0},
3706 {"i915_display_info", i915_display_info, 0},
3708 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
3710 static const struct i915_debugfs_files {
3712 const struct file_operations *fops;
3713 } i915_debugfs_files[] = {
3714 {"i915_wedged", &i915_wedged_fops},
3715 {"i915_max_freq", &i915_max_freq_fops},
3716 {"i915_min_freq", &i915_min_freq_fops},
3717 {"i915_cache_sharing", &i915_cache_sharing_fops},
3718 {"i915_ring_stop", &i915_ring_stop_fops},
3719 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
3720 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
3721 {"i915_gem_drop_caches", &i915_drop_caches_fops},
3722 {"i915_error_state", &i915_error_state_fops},
3723 {"i915_next_seqno", &i915_next_seqno_fops},
3724 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
3725 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
3726 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
3727 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
3730 void intel_display_crc_init(struct drm_device *dev)
3732 struct drm_i915_private *dev_priv = dev->dev_private;
3735 for_each_pipe(pipe) {
3736 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3738 pipe_crc->opened = false;
3739 spin_lock_init(&pipe_crc->lock);
3740 init_waitqueue_head(&pipe_crc->wq);
3744 int i915_debugfs_init(struct drm_minor *minor)
3748 ret = i915_forcewake_create(minor->debugfs_root, minor);
3752 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
3753 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
3758 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
3759 ret = i915_debugfs_create(minor->debugfs_root, minor,
3760 i915_debugfs_files[i].name,
3761 i915_debugfs_files[i].fops);
3766 return drm_debugfs_create_files(i915_debugfs_list,
3767 I915_DEBUGFS_ENTRIES,
3768 minor->debugfs_root, minor);
3771 void i915_debugfs_cleanup(struct drm_minor *minor)
3775 drm_debugfs_remove_files(i915_debugfs_list,
3776 I915_DEBUGFS_ENTRIES, minor);
3778 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
3781 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
3782 struct drm_info_list *info_list =
3783 (struct drm_info_list *)&i915_pipe_crc_data[i];
3785 drm_debugfs_remove_files(info_list, 1, minor);
3788 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
3789 struct drm_info_list *info_list =
3790 (struct drm_info_list *) i915_debugfs_files[i].fops;
3792 drm_debugfs_remove_files(info_list, 1, minor);