]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/radeon/kv_dpm.c
drm/radeon: signedness bug in kv_dpm.c
[karo-tx-linux.git] / drivers / gpu / drm / radeon / kv_dpm.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include "drmP.h"
25 #include "radeon.h"
26 #include "cikd.h"
27 #include "r600_dpm.h"
28 #include "kv_dpm.h"
29 #include "radeon_asic.h"
30 #include <linux/seq_file.h>
31
32 #define KV_MAX_DEEPSLEEP_DIVIDER_ID     5
33 #define KV_MINIMUM_ENGINE_CLOCK         800
34 #define SMC_RAM_END                     0x40000
35
36 static void kv_init_graphics_levels(struct radeon_device *rdev);
37 static int kv_calculate_ds_divider(struct radeon_device *rdev);
38 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
39 static int kv_calculate_dpm_settings(struct radeon_device *rdev);
40 static void kv_enable_new_levels(struct radeon_device *rdev);
41 static void kv_program_nbps_index_settings(struct radeon_device *rdev,
42                                            struct radeon_ps *new_rps);
43 static int kv_set_enabled_levels(struct radeon_device *rdev);
44 static int kv_force_dpm_highest(struct radeon_device *rdev);
45 static int kv_force_dpm_lowest(struct radeon_device *rdev);
46 static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
47                                         struct radeon_ps *new_rps,
48                                         struct radeon_ps *old_rps);
49 static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
50                                             int min_temp, int max_temp);
51 static int kv_init_fps_limits(struct radeon_device *rdev);
52
53 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
54 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate);
55 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate);
56 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate);
57
58 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
59 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
60 extern void cik_update_cg(struct radeon_device *rdev,
61                           u32 block, bool enable);
62
63 static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
64 {
65         {  0,       4,        1    },
66         {  1,       4,        1    },
67         {  2,       5,        1    },
68         {  3,       4,        2    },
69         {  4,       1,        1    },
70         {  5,       5,        2    },
71         {  6,       6,        1    },
72         {  7,       9,        2    },
73         { 0xffffffff }
74 };
75
76 static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
77 {
78         {  0,       4,        1    },
79         { 0xffffffff }
80 };
81
82 static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
83 {
84         {  0,       4,        1    },
85         { 0xffffffff }
86 };
87
88 static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
89 {
90         {  0,       4,        1    },
91         { 0xffffffff }
92 };
93
94 static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
95 {
96         {  0,       4,        1    },
97         { 0xffffffff }
98 };
99
100 static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
101 {
102         {  0,       4,        1    },
103         {  1,       4,        1    },
104         {  2,       5,        1    },
105         {  3,       4,        1    },
106         {  4,       1,        1    },
107         {  5,       5,        1    },
108         {  6,       6,        1    },
109         {  7,       9,        1    },
110         {  8,       4,        1    },
111         {  9,       2,        1    },
112         {  10,      3,        1    },
113         {  11,      6,        1    },
114         {  12,      8,        2    },
115         {  13,      1,        1    },
116         {  14,      2,        1    },
117         {  15,      3,        1    },
118         {  16,      1,        1    },
119         {  17,      4,        1    },
120         {  18,      3,        1    },
121         {  19,      1,        1    },
122         {  20,      8,        1    },
123         {  21,      5,        1    },
124         {  22,      1,        1    },
125         {  23,      1,        1    },
126         {  24,      4,        1    },
127         {  27,      6,        1    },
128         {  28,      1,        1    },
129         { 0xffffffff }
130 };
131
132 static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
133 {
134         { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
135 };
136
137 static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
138 {
139         { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
140 };
141
142 static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
143 {
144         { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
145 };
146
147 static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
148 {
149         { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
150 };
151
152 static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
153 {
154         { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
155 };
156
157 static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
158 {
159         { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
160 };
161
162 static const struct kv_pt_config_reg didt_config_kv[] =
163 {
164         { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
165         { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
166         { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
167         { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
168         { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
169         { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
170         { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
171         { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
172         { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
173         { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
174         { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
175         { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
176         { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
177         { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
178         { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
179         { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
180         { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
181         { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
182         { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
183         { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
184         { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
185         { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
186         { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
187         { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
188         { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
189         { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
190         { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
191         { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
192         { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
193         { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
194         { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
195         { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
196         { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
197         { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
198         { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
199         { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
200         { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
201         { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
202         { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
203         { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
204         { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
205         { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
206         { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
207         { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
208         { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
209         { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
210         { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
211         { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
212         { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
213         { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
214         { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
215         { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
216         { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
217         { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
218         { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
219         { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
220         { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
221         { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
222         { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
223         { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
224         { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
225         { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
226         { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
227         { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
228         { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
229         { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
230         { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
231         { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
232         { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
233         { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
234         { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
235         { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
236         { 0xFFFFFFFF }
237 };
238
239 static struct kv_ps *kv_get_ps(struct radeon_ps *rps)
240 {
241         struct kv_ps *ps = rps->ps_priv;
242
243         return ps;
244 }
245
246 static struct kv_power_info *kv_get_pi(struct radeon_device *rdev)
247 {
248         struct kv_power_info *pi = rdev->pm.dpm.priv;
249
250         return pi;
251 }
252
253 #if 0
254 static void kv_program_local_cac_table(struct radeon_device *rdev,
255                                        const struct kv_lcac_config_values *local_cac_table,
256                                        const struct kv_lcac_config_reg *local_cac_reg)
257 {
258         u32 i, count, data;
259         const struct kv_lcac_config_values *values = local_cac_table;
260
261         while (values->block_id != 0xffffffff) {
262                 count = values->signal_id;
263                 for (i = 0; i < count; i++) {
264                         data = ((values->block_id << local_cac_reg->block_shift) &
265                                 local_cac_reg->block_mask);
266                         data |= ((i << local_cac_reg->signal_shift) &
267                                  local_cac_reg->signal_mask);
268                         data |= ((values->t << local_cac_reg->t_shift) &
269                                  local_cac_reg->t_mask);
270                         data |= ((1 << local_cac_reg->enable_shift) &
271                                  local_cac_reg->enable_mask);
272                         WREG32_SMC(local_cac_reg->cntl, data);
273                 }
274                 values++;
275         }
276 }
277 #endif
278
279 static int kv_program_pt_config_registers(struct radeon_device *rdev,
280                                           const struct kv_pt_config_reg *cac_config_regs)
281 {
282         const struct kv_pt_config_reg *config_regs = cac_config_regs;
283         u32 data;
284         u32 cache = 0;
285
286         if (config_regs == NULL)
287                 return -EINVAL;
288
289         while (config_regs->offset != 0xFFFFFFFF) {
290                 if (config_regs->type == KV_CONFIGREG_CACHE) {
291                         cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
292                 } else {
293                         switch (config_regs->type) {
294                         case KV_CONFIGREG_SMC_IND:
295                                 data = RREG32_SMC(config_regs->offset);
296                                 break;
297                         case KV_CONFIGREG_DIDT_IND:
298                                 data = RREG32_DIDT(config_regs->offset);
299                                 break;
300                         default:
301                                 data = RREG32(config_regs->offset << 2);
302                                 break;
303                         }
304
305                         data &= ~config_regs->mask;
306                         data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
307                         data |= cache;
308                         cache = 0;
309
310                         switch (config_regs->type) {
311                         case KV_CONFIGREG_SMC_IND:
312                                 WREG32_SMC(config_regs->offset, data);
313                                 break;
314                         case KV_CONFIGREG_DIDT_IND:
315                                 WREG32_DIDT(config_regs->offset, data);
316                                 break;
317                         default:
318                                 WREG32(config_regs->offset << 2, data);
319                                 break;
320                         }
321                 }
322                 config_regs++;
323         }
324
325         return 0;
326 }
327
328 static void kv_do_enable_didt(struct radeon_device *rdev, bool enable)
329 {
330         struct kv_power_info *pi = kv_get_pi(rdev);
331         u32 data;
332
333         if (pi->caps_sq_ramping) {
334                 data = RREG32_DIDT(DIDT_SQ_CTRL0);
335                 if (enable)
336                         data |= DIDT_CTRL_EN;
337                 else
338                         data &= ~DIDT_CTRL_EN;
339                 WREG32_DIDT(DIDT_SQ_CTRL0, data);
340         }
341
342         if (pi->caps_db_ramping) {
343                 data = RREG32_DIDT(DIDT_DB_CTRL0);
344                 if (enable)
345                         data |= DIDT_CTRL_EN;
346                 else
347                         data &= ~DIDT_CTRL_EN;
348                 WREG32_DIDT(DIDT_DB_CTRL0, data);
349         }
350
351         if (pi->caps_td_ramping) {
352                 data = RREG32_DIDT(DIDT_TD_CTRL0);
353                 if (enable)
354                         data |= DIDT_CTRL_EN;
355                 else
356                         data &= ~DIDT_CTRL_EN;
357                 WREG32_DIDT(DIDT_TD_CTRL0, data);
358         }
359
360         if (pi->caps_tcp_ramping) {
361                 data = RREG32_DIDT(DIDT_TCP_CTRL0);
362                 if (enable)
363                         data |= DIDT_CTRL_EN;
364                 else
365                         data &= ~DIDT_CTRL_EN;
366                 WREG32_DIDT(DIDT_TCP_CTRL0, data);
367         }
368 }
369
370 static int kv_enable_didt(struct radeon_device *rdev, bool enable)
371 {
372         struct kv_power_info *pi = kv_get_pi(rdev);
373         int ret;
374
375         if (pi->caps_sq_ramping ||
376             pi->caps_db_ramping ||
377             pi->caps_td_ramping ||
378             pi->caps_tcp_ramping) {
379                 cik_enter_rlc_safe_mode(rdev);
380
381                 if (enable) {
382                         ret = kv_program_pt_config_registers(rdev, didt_config_kv);
383                         if (ret) {
384                                 cik_exit_rlc_safe_mode(rdev);
385                                 return ret;
386                         }
387                 }
388
389                 kv_do_enable_didt(rdev, enable);
390
391                 cik_exit_rlc_safe_mode(rdev);
392         }
393
394         return 0;
395 }
396
397 #if 0
398 static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev)
399 {
400         struct kv_power_info *pi = kv_get_pi(rdev);
401
402         if (pi->caps_cac) {
403                 WREG32_SMC(LCAC_SX0_OVR_SEL, 0);
404                 WREG32_SMC(LCAC_SX0_OVR_VAL, 0);
405                 kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
406
407                 WREG32_SMC(LCAC_MC0_OVR_SEL, 0);
408                 WREG32_SMC(LCAC_MC0_OVR_VAL, 0);
409                 kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
410
411                 WREG32_SMC(LCAC_MC1_OVR_SEL, 0);
412                 WREG32_SMC(LCAC_MC1_OVR_VAL, 0);
413                 kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
414
415                 WREG32_SMC(LCAC_MC2_OVR_SEL, 0);
416                 WREG32_SMC(LCAC_MC2_OVR_VAL, 0);
417                 kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
418
419                 WREG32_SMC(LCAC_MC3_OVR_SEL, 0);
420                 WREG32_SMC(LCAC_MC3_OVR_VAL, 0);
421                 kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
422
423                 WREG32_SMC(LCAC_CPL_OVR_SEL, 0);
424                 WREG32_SMC(LCAC_CPL_OVR_VAL, 0);
425                 kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
426         }
427 }
428 #endif
429
430 static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable)
431 {
432         struct kv_power_info *pi = kv_get_pi(rdev);
433         int ret = 0;
434
435         if (pi->caps_cac) {
436                 if (enable) {
437                         ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac);
438                         if (ret)
439                                 pi->cac_enabled = false;
440                         else
441                                 pi->cac_enabled = true;
442                 } else if (pi->cac_enabled) {
443                         kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac);
444                         pi->cac_enabled = false;
445                 }
446         }
447
448         return ret;
449 }
450
451 static int kv_process_firmware_header(struct radeon_device *rdev)
452 {
453         struct kv_power_info *pi = kv_get_pi(rdev);
454         u32 tmp;
455         int ret;
456
457         ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
458                                      offsetof(SMU7_Firmware_Header, DpmTable),
459                                      &tmp, pi->sram_end);
460
461         if (ret == 0)
462                 pi->dpm_table_start = tmp;
463
464         ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
465                                      offsetof(SMU7_Firmware_Header, SoftRegisters),
466                                      &tmp, pi->sram_end);
467
468         if (ret == 0)
469                 pi->soft_regs_start = tmp;
470
471         return ret;
472 }
473
474 static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev)
475 {
476         struct kv_power_info *pi = kv_get_pi(rdev);
477         int ret;
478
479         pi->graphics_voltage_change_enable = 1;
480
481         ret = kv_copy_bytes_to_smc(rdev,
482                                    pi->dpm_table_start +
483                                    offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
484                                    &pi->graphics_voltage_change_enable,
485                                    sizeof(u8), pi->sram_end);
486
487         return ret;
488 }
489
490 static int kv_set_dpm_interval(struct radeon_device *rdev)
491 {
492         struct kv_power_info *pi = kv_get_pi(rdev);
493         int ret;
494
495         pi->graphics_interval = 1;
496
497         ret = kv_copy_bytes_to_smc(rdev,
498                                    pi->dpm_table_start +
499                                    offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
500                                    &pi->graphics_interval,
501                                    sizeof(u8), pi->sram_end);
502
503         return ret;
504 }
505
506 static int kv_set_dpm_boot_state(struct radeon_device *rdev)
507 {
508         struct kv_power_info *pi = kv_get_pi(rdev);
509         int ret;
510
511         ret = kv_copy_bytes_to_smc(rdev,
512                                    pi->dpm_table_start +
513                                    offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
514                                    &pi->graphics_boot_level,
515                                    sizeof(u8), pi->sram_end);
516
517         return ret;
518 }
519
520 static void kv_program_vc(struct radeon_device *rdev)
521 {
522         WREG32_SMC(CG_FTV_0, 0x3FFFC000);
523 }
524
525 static void kv_clear_vc(struct radeon_device *rdev)
526 {
527         WREG32_SMC(CG_FTV_0, 0);
528 }
529
530 static int kv_set_divider_value(struct radeon_device *rdev,
531                                 u32 index, u32 sclk)
532 {
533         struct kv_power_info *pi = kv_get_pi(rdev);
534         struct atom_clock_dividers dividers;
535         int ret;
536
537         ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
538                                              sclk, false, &dividers);
539         if (ret)
540                 return ret;
541
542         pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
543         pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
544
545         return 0;
546 }
547
548 static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
549                                             u16 voltage)
550 {
551         return 6200 - (voltage * 25);
552 }
553
554 static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
555                                             u32 vid_2bit)
556 {
557         struct kv_power_info *pi = kv_get_pi(rdev);
558         u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev,
559                                                  &pi->sys_info.vid_mapping_table,
560                                                  vid_2bit);
561
562         return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
563 }
564
565
566 static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
567 {
568         struct kv_power_info *pi = kv_get_pi(rdev);
569
570         pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
571         pi->graphics_level[index].MinVddNb =
572                 cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid));
573
574         return 0;
575 }
576
577 static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at)
578 {
579         struct kv_power_info *pi = kv_get_pi(rdev);
580
581         pi->graphics_level[index].AT = cpu_to_be16((u16)at);
582
583         return 0;
584 }
585
586 static void kv_dpm_power_level_enable(struct radeon_device *rdev,
587                                       u32 index, bool enable)
588 {
589         struct kv_power_info *pi = kv_get_pi(rdev);
590
591         pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
592 }
593
594 static void kv_start_dpm(struct radeon_device *rdev)
595 {
596         u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
597
598         tmp |= GLOBAL_PWRMGT_EN;
599         WREG32_SMC(GENERAL_PWRMGT, tmp);
600
601         kv_smc_dpm_enable(rdev, true);
602 }
603
604 static void kv_stop_dpm(struct radeon_device *rdev)
605 {
606         kv_smc_dpm_enable(rdev, false);
607 }
608
609 static void kv_start_am(struct radeon_device *rdev)
610 {
611         u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
612
613         sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
614         sclk_pwrmgt_cntl |= DYNAMIC_PM_EN;
615
616         WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
617 }
618
619 static void kv_reset_am(struct radeon_device *rdev)
620 {
621         u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
622
623         sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
624
625         WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
626 }
627
628 static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze)
629 {
630         return kv_notify_message_to_smu(rdev, freeze ?
631                                         PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
632 }
633
634 static int kv_force_lowest_valid(struct radeon_device *rdev)
635 {
636         return kv_force_dpm_lowest(rdev);
637 }
638
639 static int kv_unforce_levels(struct radeon_device *rdev)
640 {
641         return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
642 }
643
644 static int kv_update_sclk_t(struct radeon_device *rdev)
645 {
646         struct kv_power_info *pi = kv_get_pi(rdev);
647         u32 low_sclk_interrupt_t = 0;
648         int ret = 0;
649
650         if (pi->caps_sclk_throttle_low_notification) {
651                 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
652
653                 ret = kv_copy_bytes_to_smc(rdev,
654                                            pi->dpm_table_start +
655                                            offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
656                                            (u8 *)&low_sclk_interrupt_t,
657                                            sizeof(u32), pi->sram_end);
658         }
659         return ret;
660 }
661
662 static int kv_program_bootup_state(struct radeon_device *rdev)
663 {
664         struct kv_power_info *pi = kv_get_pi(rdev);
665         u32 i;
666         struct radeon_clock_voltage_dependency_table *table =
667                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
668
669         if (table && table->count) {
670                 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
671                         if (table->entries[i].clk == pi->boot_pl.sclk)
672                                 break;
673                 }
674
675                 pi->graphics_boot_level = (u8)i;
676                 kv_dpm_power_level_enable(rdev, i, true);
677         } else {
678                 struct sumo_sclk_voltage_mapping_table *table =
679                         &pi->sys_info.sclk_voltage_mapping_table;
680
681                 if (table->num_max_dpm_entries == 0)
682                         return -EINVAL;
683
684                 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
685                         if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
686                                 break;
687                 }
688
689                 pi->graphics_boot_level = (u8)i;
690                 kv_dpm_power_level_enable(rdev, i, true);
691         }
692         return 0;
693 }
694
695 static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev)
696 {
697         struct kv_power_info *pi = kv_get_pi(rdev);
698         int ret;
699
700         pi->graphics_therm_throttle_enable = 1;
701
702         ret = kv_copy_bytes_to_smc(rdev,
703                                    pi->dpm_table_start +
704                                    offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
705                                    &pi->graphics_therm_throttle_enable,
706                                    sizeof(u8), pi->sram_end);
707
708         return ret;
709 }
710
711 static int kv_upload_dpm_settings(struct radeon_device *rdev)
712 {
713         struct kv_power_info *pi = kv_get_pi(rdev);
714         int ret;
715
716         ret = kv_copy_bytes_to_smc(rdev,
717                                    pi->dpm_table_start +
718                                    offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
719                                    (u8 *)&pi->graphics_level,
720                                    sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
721                                    pi->sram_end);
722
723         if (ret)
724                 return ret;
725
726         ret = kv_copy_bytes_to_smc(rdev,
727                                    pi->dpm_table_start +
728                                    offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
729                                    &pi->graphics_dpm_level_count,
730                                    sizeof(u8), pi->sram_end);
731
732         return ret;
733 }
734
735 static u32 kv_get_clock_difference(u32 a, u32 b)
736 {
737         return (a >= b) ? a - b : b - a;
738 }
739
740 static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk)
741 {
742         struct kv_power_info *pi = kv_get_pi(rdev);
743         u32 value;
744
745         if (pi->caps_enable_dfs_bypass) {
746                 if (kv_get_clock_difference(clk, 40000) < 200)
747                         value = 3;
748                 else if (kv_get_clock_difference(clk, 30000) < 200)
749                         value = 2;
750                 else if (kv_get_clock_difference(clk, 20000) < 200)
751                         value = 7;
752                 else if (kv_get_clock_difference(clk, 15000) < 200)
753                         value = 6;
754                 else if (kv_get_clock_difference(clk, 10000) < 200)
755                         value = 8;
756                 else
757                         value = 0;
758         } else {
759                 value = 0;
760         }
761
762         return value;
763 }
764
765 static int kv_populate_uvd_table(struct radeon_device *rdev)
766 {
767         struct kv_power_info *pi = kv_get_pi(rdev);
768         struct radeon_uvd_clock_voltage_dependency_table *table =
769                 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
770         struct atom_clock_dividers dividers;
771         int ret;
772         u32 i;
773
774         if (table == NULL || table->count == 0)
775                 return 0;
776
777         pi->uvd_level_count = 0;
778         for (i = 0; i < table->count; i++) {
779                 if (pi->high_voltage_t &&
780                     (pi->high_voltage_t < table->entries[i].v))
781                         break;
782
783                 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
784                 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
785                 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
786
787                 pi->uvd_level[i].VClkBypassCntl =
788                         (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk);
789                 pi->uvd_level[i].DClkBypassCntl =
790                         (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk);
791
792                 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
793                                                      table->entries[i].vclk, false, &dividers);
794                 if (ret)
795                         return ret;
796                 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
797
798                 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
799                                                      table->entries[i].dclk, false, &dividers);
800                 if (ret)
801                         return ret;
802                 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
803
804                 pi->uvd_level_count++;
805         }
806
807         ret = kv_copy_bytes_to_smc(rdev,
808                                    pi->dpm_table_start +
809                                    offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
810                                    (u8 *)&pi->uvd_level_count,
811                                    sizeof(u8), pi->sram_end);
812         if (ret)
813                 return ret;
814
815         pi->uvd_interval = 1;
816
817         ret = kv_copy_bytes_to_smc(rdev,
818                                    pi->dpm_table_start +
819                                    offsetof(SMU7_Fusion_DpmTable, UVDInterval),
820                                    &pi->uvd_interval,
821                                    sizeof(u8), pi->sram_end);
822         if (ret)
823                 return ret;
824
825         ret = kv_copy_bytes_to_smc(rdev,
826                                    pi->dpm_table_start +
827                                    offsetof(SMU7_Fusion_DpmTable, UvdLevel),
828                                    (u8 *)&pi->uvd_level,
829                                    sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
830                                    pi->sram_end);
831
832         return ret;
833
834 }
835
836 static int kv_populate_vce_table(struct radeon_device *rdev)
837 {
838         struct kv_power_info *pi = kv_get_pi(rdev);
839         int ret;
840         u32 i;
841         struct radeon_vce_clock_voltage_dependency_table *table =
842                 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
843         struct atom_clock_dividers dividers;
844
845         if (table == NULL || table->count == 0)
846                 return 0;
847
848         pi->vce_level_count = 0;
849         for (i = 0; i < table->count; i++) {
850                 if (pi->high_voltage_t &&
851                     pi->high_voltage_t < table->entries[i].v)
852                         break;
853
854                 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
855                 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
856
857                 pi->vce_level[i].ClkBypassCntl =
858                         (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk);
859
860                 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
861                                                      table->entries[i].evclk, false, &dividers);
862                 if (ret)
863                         return ret;
864                 pi->vce_level[i].Divider = (u8)dividers.post_div;
865
866                 pi->vce_level_count++;
867         }
868
869         ret = kv_copy_bytes_to_smc(rdev,
870                                    pi->dpm_table_start +
871                                    offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
872                                    (u8 *)&pi->vce_level_count,
873                                    sizeof(u8),
874                                    pi->sram_end);
875         if (ret)
876                 return ret;
877
878         pi->vce_interval = 1;
879
880         ret = kv_copy_bytes_to_smc(rdev,
881                                    pi->dpm_table_start +
882                                    offsetof(SMU7_Fusion_DpmTable, VCEInterval),
883                                    (u8 *)&pi->vce_interval,
884                                    sizeof(u8),
885                                    pi->sram_end);
886         if (ret)
887                 return ret;
888
889         ret = kv_copy_bytes_to_smc(rdev,
890                                    pi->dpm_table_start +
891                                    offsetof(SMU7_Fusion_DpmTable, VceLevel),
892                                    (u8 *)&pi->vce_level,
893                                    sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
894                                    pi->sram_end);
895
896         return ret;
897 }
898
899 static int kv_populate_samu_table(struct radeon_device *rdev)
900 {
901         struct kv_power_info *pi = kv_get_pi(rdev);
902         struct radeon_clock_voltage_dependency_table *table =
903                 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
904         struct atom_clock_dividers dividers;
905         int ret;
906         u32 i;
907
908         if (table == NULL || table->count == 0)
909                 return 0;
910
911         pi->samu_level_count = 0;
912         for (i = 0; i < table->count; i++) {
913                 if (pi->high_voltage_t &&
914                     pi->high_voltage_t < table->entries[i].v)
915                         break;
916
917                 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
918                 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
919
920                 pi->samu_level[i].ClkBypassCntl =
921                         (u8)kv_get_clk_bypass(rdev, table->entries[i].clk);
922
923                 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
924                                                      table->entries[i].clk, false, &dividers);
925                 if (ret)
926                         return ret;
927                 pi->samu_level[i].Divider = (u8)dividers.post_div;
928
929                 pi->samu_level_count++;
930         }
931
932         ret = kv_copy_bytes_to_smc(rdev,
933                                    pi->dpm_table_start +
934                                    offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
935                                    (u8 *)&pi->samu_level_count,
936                                    sizeof(u8),
937                                    pi->sram_end);
938         if (ret)
939                 return ret;
940
941         pi->samu_interval = 1;
942
943         ret = kv_copy_bytes_to_smc(rdev,
944                                    pi->dpm_table_start +
945                                    offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
946                                    (u8 *)&pi->samu_interval,
947                                    sizeof(u8),
948                                    pi->sram_end);
949         if (ret)
950                 return ret;
951
952         ret = kv_copy_bytes_to_smc(rdev,
953                                    pi->dpm_table_start +
954                                    offsetof(SMU7_Fusion_DpmTable, SamuLevel),
955                                    (u8 *)&pi->samu_level,
956                                    sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
957                                    pi->sram_end);
958         if (ret)
959                 return ret;
960
961         return ret;
962 }
963
964
965 static int kv_populate_acp_table(struct radeon_device *rdev)
966 {
967         struct kv_power_info *pi = kv_get_pi(rdev);
968         struct radeon_clock_voltage_dependency_table *table =
969                 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
970         struct atom_clock_dividers dividers;
971         int ret;
972         u32 i;
973
974         if (table == NULL || table->count == 0)
975                 return 0;
976
977         pi->acp_level_count = 0;
978         for (i = 0; i < table->count; i++) {
979                 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
980                 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
981
982                 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
983                                                      table->entries[i].clk, false, &dividers);
984                 if (ret)
985                         return ret;
986                 pi->acp_level[i].Divider = (u8)dividers.post_div;
987
988                 pi->acp_level_count++;
989         }
990
991         ret = kv_copy_bytes_to_smc(rdev,
992                                    pi->dpm_table_start +
993                                    offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
994                                    (u8 *)&pi->acp_level_count,
995                                    sizeof(u8),
996                                    pi->sram_end);
997         if (ret)
998                 return ret;
999
1000         pi->acp_interval = 1;
1001
1002         ret = kv_copy_bytes_to_smc(rdev,
1003                                    pi->dpm_table_start +
1004                                    offsetof(SMU7_Fusion_DpmTable, ACPInterval),
1005                                    (u8 *)&pi->acp_interval,
1006                                    sizeof(u8),
1007                                    pi->sram_end);
1008         if (ret)
1009                 return ret;
1010
1011         ret = kv_copy_bytes_to_smc(rdev,
1012                                    pi->dpm_table_start +
1013                                    offsetof(SMU7_Fusion_DpmTable, AcpLevel),
1014                                    (u8 *)&pi->acp_level,
1015                                    sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
1016                                    pi->sram_end);
1017         if (ret)
1018                 return ret;
1019
1020         return ret;
1021 }
1022
1023 static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev)
1024 {
1025         struct kv_power_info *pi = kv_get_pi(rdev);
1026         u32 i;
1027         struct radeon_clock_voltage_dependency_table *table =
1028                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1029
1030         if (table && table->count) {
1031                 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1032                         if (pi->caps_enable_dfs_bypass) {
1033                                 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
1034                                         pi->graphics_level[i].ClkBypassCntl = 3;
1035                                 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
1036                                         pi->graphics_level[i].ClkBypassCntl = 2;
1037                                 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
1038                                         pi->graphics_level[i].ClkBypassCntl = 7;
1039                                 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
1040                                         pi->graphics_level[i].ClkBypassCntl = 6;
1041                                 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
1042                                         pi->graphics_level[i].ClkBypassCntl = 8;
1043                                 else
1044                                         pi->graphics_level[i].ClkBypassCntl = 0;
1045                         } else {
1046                                 pi->graphics_level[i].ClkBypassCntl = 0;
1047                         }
1048                 }
1049         } else {
1050                 struct sumo_sclk_voltage_mapping_table *table =
1051                         &pi->sys_info.sclk_voltage_mapping_table;
1052                 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1053                         if (pi->caps_enable_dfs_bypass) {
1054                                 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
1055                                         pi->graphics_level[i].ClkBypassCntl = 3;
1056                                 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
1057                                         pi->graphics_level[i].ClkBypassCntl = 2;
1058                                 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
1059                                         pi->graphics_level[i].ClkBypassCntl = 7;
1060                                 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
1061                                         pi->graphics_level[i].ClkBypassCntl = 6;
1062                                 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
1063                                         pi->graphics_level[i].ClkBypassCntl = 8;
1064                                 else
1065                                         pi->graphics_level[i].ClkBypassCntl = 0;
1066                         } else {
1067                                 pi->graphics_level[i].ClkBypassCntl = 0;
1068                         }
1069                 }
1070         }
1071 }
1072
1073 static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
1074 {
1075         return kv_notify_message_to_smu(rdev, enable ?
1076                                         PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
1077 }
1078
1079 static void kv_update_current_ps(struct radeon_device *rdev,
1080                                  struct radeon_ps *rps)
1081 {
1082         struct kv_ps *new_ps = kv_get_ps(rps);
1083         struct kv_power_info *pi = kv_get_pi(rdev);
1084
1085         pi->current_rps = *rps;
1086         pi->current_ps = *new_ps;
1087         pi->current_rps.ps_priv = &pi->current_ps;
1088 }
1089
1090 static void kv_update_requested_ps(struct radeon_device *rdev,
1091                                    struct radeon_ps *rps)
1092 {
1093         struct kv_ps *new_ps = kv_get_ps(rps);
1094         struct kv_power_info *pi = kv_get_pi(rdev);
1095
1096         pi->requested_rps = *rps;
1097         pi->requested_ps = *new_ps;
1098         pi->requested_rps.ps_priv = &pi->requested_ps;
1099 }
1100
1101 int kv_dpm_enable(struct radeon_device *rdev)
1102 {
1103         struct kv_power_info *pi = kv_get_pi(rdev);
1104         int ret;
1105
1106         cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
1107                              RADEON_CG_BLOCK_SDMA |
1108                              RADEON_CG_BLOCK_BIF |
1109                              RADEON_CG_BLOCK_HDP), false);
1110
1111         ret = kv_process_firmware_header(rdev);
1112         if (ret) {
1113                 DRM_ERROR("kv_process_firmware_header failed\n");
1114                 return ret;
1115         }
1116         kv_init_fps_limits(rdev);
1117         kv_init_graphics_levels(rdev);
1118         ret = kv_program_bootup_state(rdev);
1119         if (ret) {
1120                 DRM_ERROR("kv_program_bootup_state failed\n");
1121                 return ret;
1122         }
1123         kv_calculate_dfs_bypass_settings(rdev);
1124         ret = kv_upload_dpm_settings(rdev);
1125         if (ret) {
1126                 DRM_ERROR("kv_upload_dpm_settings failed\n");
1127                 return ret;
1128         }
1129         ret = kv_populate_uvd_table(rdev);
1130         if (ret) {
1131                 DRM_ERROR("kv_populate_uvd_table failed\n");
1132                 return ret;
1133         }
1134         ret = kv_populate_vce_table(rdev);
1135         if (ret) {
1136                 DRM_ERROR("kv_populate_vce_table failed\n");
1137                 return ret;
1138         }
1139         ret = kv_populate_samu_table(rdev);
1140         if (ret) {
1141                 DRM_ERROR("kv_populate_samu_table failed\n");
1142                 return ret;
1143         }
1144         ret = kv_populate_acp_table(rdev);
1145         if (ret) {
1146                 DRM_ERROR("kv_populate_acp_table failed\n");
1147                 return ret;
1148         }
1149         kv_program_vc(rdev);
1150 #if 0
1151         kv_initialize_hardware_cac_manager(rdev);
1152 #endif
1153         kv_start_am(rdev);
1154         if (pi->enable_auto_thermal_throttling) {
1155                 ret = kv_enable_auto_thermal_throttling(rdev);
1156                 if (ret) {
1157                         DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
1158                         return ret;
1159                 }
1160         }
1161         ret = kv_enable_dpm_voltage_scaling(rdev);
1162         if (ret) {
1163                 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
1164                 return ret;
1165         }
1166         ret = kv_set_dpm_interval(rdev);
1167         if (ret) {
1168                 DRM_ERROR("kv_set_dpm_interval failed\n");
1169                 return ret;
1170         }
1171         ret = kv_set_dpm_boot_state(rdev);
1172         if (ret) {
1173                 DRM_ERROR("kv_set_dpm_boot_state failed\n");
1174                 return ret;
1175         }
1176         ret = kv_enable_ulv(rdev, true);
1177         if (ret) {
1178                 DRM_ERROR("kv_enable_ulv failed\n");
1179                 return ret;
1180         }
1181         kv_start_dpm(rdev);
1182         ret = kv_enable_didt(rdev, true);
1183         if (ret) {
1184                 DRM_ERROR("kv_enable_didt failed\n");
1185                 return ret;
1186         }
1187         ret = kv_enable_smc_cac(rdev, true);
1188         if (ret) {
1189                 DRM_ERROR("kv_enable_smc_cac failed\n");
1190                 return ret;
1191         }
1192
1193         if (rdev->irq.installed &&
1194             r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1195                 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1196                 if (ret) {
1197                         DRM_ERROR("kv_set_thermal_temperature_range failed\n");
1198                         return ret;
1199                 }
1200                 rdev->irq.dpm_thermal = true;
1201                 radeon_irq_set(rdev);
1202         }
1203
1204         /* powerdown unused blocks for now */
1205         kv_dpm_powergate_acp(rdev, true);
1206         kv_dpm_powergate_samu(rdev, true);
1207         kv_dpm_powergate_vce(rdev, true);
1208         kv_dpm_powergate_uvd(rdev, true);
1209
1210         cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
1211                              RADEON_CG_BLOCK_SDMA |
1212                              RADEON_CG_BLOCK_BIF |
1213                              RADEON_CG_BLOCK_HDP), true);
1214
1215         kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1216
1217         return ret;
1218 }
1219
1220 void kv_dpm_disable(struct radeon_device *rdev)
1221 {
1222         cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
1223                              RADEON_CG_BLOCK_SDMA |
1224                              RADEON_CG_BLOCK_BIF |
1225                              RADEON_CG_BLOCK_HDP), false);
1226
1227         /* powerup blocks */
1228         kv_dpm_powergate_acp(rdev, false);
1229         kv_dpm_powergate_samu(rdev, false);
1230         kv_dpm_powergate_vce(rdev, false);
1231         kv_dpm_powergate_uvd(rdev, false);
1232
1233         kv_enable_smc_cac(rdev, false);
1234         kv_enable_didt(rdev, false);
1235         kv_clear_vc(rdev);
1236         kv_stop_dpm(rdev);
1237         kv_enable_ulv(rdev, false);
1238         kv_reset_am(rdev);
1239
1240         kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1241 }
1242
1243 #if 0
1244 static int kv_write_smc_soft_register(struct radeon_device *rdev,
1245                                       u16 reg_offset, u32 value)
1246 {
1247         struct kv_power_info *pi = kv_get_pi(rdev);
1248
1249         return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset,
1250                                     (u8 *)&value, sizeof(u16), pi->sram_end);
1251 }
1252
1253 static int kv_read_smc_soft_register(struct radeon_device *rdev,
1254                                      u16 reg_offset, u32 *value)
1255 {
1256         struct kv_power_info *pi = kv_get_pi(rdev);
1257
1258         return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset,
1259                                       value, pi->sram_end);
1260 }
1261 #endif
1262
1263 static void kv_init_sclk_t(struct radeon_device *rdev)
1264 {
1265         struct kv_power_info *pi = kv_get_pi(rdev);
1266
1267         pi->low_sclk_interrupt_t = 0;
1268 }
1269
1270 static int kv_init_fps_limits(struct radeon_device *rdev)
1271 {
1272         struct kv_power_info *pi = kv_get_pi(rdev);
1273         int ret = 0;
1274
1275         if (pi->caps_fps) {
1276                 u16 tmp;
1277
1278                 tmp = 45;
1279                 pi->fps_high_t = cpu_to_be16(tmp);
1280                 ret = kv_copy_bytes_to_smc(rdev,
1281                                            pi->dpm_table_start +
1282                                            offsetof(SMU7_Fusion_DpmTable, FpsHighT),
1283                                            (u8 *)&pi->fps_high_t,
1284                                            sizeof(u16), pi->sram_end);
1285
1286                 tmp = 30;
1287                 pi->fps_low_t = cpu_to_be16(tmp);
1288
1289                 ret = kv_copy_bytes_to_smc(rdev,
1290                                            pi->dpm_table_start +
1291                                            offsetof(SMU7_Fusion_DpmTable, FpsLowT),
1292                                            (u8 *)&pi->fps_low_t,
1293                                            sizeof(u16), pi->sram_end);
1294
1295         }
1296         return ret;
1297 }
1298
1299 static void kv_init_powergate_state(struct radeon_device *rdev)
1300 {
1301         struct kv_power_info *pi = kv_get_pi(rdev);
1302
1303         pi->uvd_power_gated = false;
1304         pi->vce_power_gated = false;
1305         pi->samu_power_gated = false;
1306         pi->acp_power_gated = false;
1307
1308 }
1309
1310 static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
1311 {
1312         return kv_notify_message_to_smu(rdev, enable ?
1313                                         PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
1314 }
1315
1316 #if 0
1317 static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable)
1318 {
1319         return kv_notify_message_to_smu(rdev, enable ?
1320                                         PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
1321 }
1322 #endif
1323
1324 static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable)
1325 {
1326         return kv_notify_message_to_smu(rdev, enable ?
1327                                         PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
1328 }
1329
1330 static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable)
1331 {
1332         return kv_notify_message_to_smu(rdev, enable ?
1333                                         PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
1334 }
1335
1336 static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
1337 {
1338         struct kv_power_info *pi = kv_get_pi(rdev);
1339         struct radeon_uvd_clock_voltage_dependency_table *table =
1340                 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1341         int ret;
1342
1343         if (!gate) {
1344                 if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state)
1345                         pi->uvd_boot_level = table->count - 1;
1346                 else
1347                         pi->uvd_boot_level = 0;
1348
1349                 ret = kv_copy_bytes_to_smc(rdev,
1350                                            pi->dpm_table_start +
1351                                            offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
1352                                            (uint8_t *)&pi->uvd_boot_level,
1353                                            sizeof(u8), pi->sram_end);
1354                 if (ret)
1355                         return ret;
1356
1357                 if (!pi->caps_uvd_dpm ||
1358                     pi->caps_stable_p_state)
1359                         kv_send_msg_to_smc_with_parameter(rdev,
1360                                                           PPSMC_MSG_UVDDPM_SetEnabledMask,
1361                                                           (1 << pi->uvd_boot_level));
1362         }
1363
1364         return kv_enable_uvd_dpm(rdev, !gate);
1365 }
1366
1367 #if 0
1368 static u8 kv_get_vce_boot_level(struct radeon_device *rdev)
1369 {
1370         u8 i;
1371         struct radeon_vce_clock_voltage_dependency_table *table =
1372                 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1373
1374         for (i = 0; i < table->count; i++) {
1375                 if (table->entries[i].evclk >= 0) /* XXX */
1376                         break;
1377         }
1378
1379         return i;
1380 }
1381
1382 static int kv_update_vce_dpm(struct radeon_device *rdev,
1383                              struct radeon_ps *radeon_new_state,
1384                              struct radeon_ps *radeon_current_state)
1385 {
1386         struct kv_power_info *pi = kv_get_pi(rdev);
1387         struct radeon_vce_clock_voltage_dependency_table *table =
1388                 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1389         int ret;
1390
1391         if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) {
1392                 if (pi->caps_stable_p_state)
1393                         pi->vce_boot_level = table->count - 1;
1394                 else
1395                         pi->vce_boot_level = kv_get_vce_boot_level(rdev);
1396
1397                 ret = kv_copy_bytes_to_smc(rdev,
1398                                            pi->dpm_table_start +
1399                                            offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
1400                                            (u8 *)&pi->vce_boot_level,
1401                                            sizeof(u8),
1402                                            pi->sram_end);
1403                 if (ret)
1404                         return ret;
1405
1406                 if (pi->caps_stable_p_state)
1407                         kv_send_msg_to_smc_with_parameter(rdev,
1408                                                           PPSMC_MSG_VCEDPM_SetEnabledMask,
1409                                                           (1 << pi->vce_boot_level));
1410
1411                 kv_enable_vce_dpm(rdev, true);
1412         } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) {
1413                 kv_enable_vce_dpm(rdev, false);
1414         }
1415
1416         return 0;
1417 }
1418 #endif
1419
1420 static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
1421 {
1422         struct kv_power_info *pi = kv_get_pi(rdev);
1423         struct radeon_clock_voltage_dependency_table *table =
1424                 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
1425         int ret;
1426
1427         if (!gate) {
1428                 if (pi->caps_stable_p_state)
1429                         pi->samu_boot_level = table->count - 1;
1430                 else
1431                         pi->samu_boot_level = 0;
1432
1433                 ret = kv_copy_bytes_to_smc(rdev,
1434                                            pi->dpm_table_start +
1435                                            offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
1436                                            (u8 *)&pi->samu_boot_level,
1437                                            sizeof(u8),
1438                                            pi->sram_end);
1439                 if (ret)
1440                         return ret;
1441
1442                 if (pi->caps_stable_p_state)
1443                         kv_send_msg_to_smc_with_parameter(rdev,
1444                                                           PPSMC_MSG_SAMUDPM_SetEnabledMask,
1445                                                           (1 << pi->samu_boot_level));
1446         }
1447
1448         return kv_enable_samu_dpm(rdev, !gate);
1449 }
1450
1451 static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
1452 {
1453         struct kv_power_info *pi = kv_get_pi(rdev);
1454         struct radeon_clock_voltage_dependency_table *table =
1455                 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1456         int ret;
1457
1458         if (!gate) {
1459                 if (pi->caps_stable_p_state)
1460                         pi->acp_boot_level = table->count - 1;
1461                 else
1462                         pi->acp_boot_level = 0;
1463
1464                 ret = kv_copy_bytes_to_smc(rdev,
1465                                            pi->dpm_table_start +
1466                                            offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
1467                                            (u8 *)&pi->acp_boot_level,
1468                                            sizeof(u8),
1469                                            pi->sram_end);
1470                 if (ret)
1471                         return ret;
1472
1473                 if (pi->caps_stable_p_state)
1474                         kv_send_msg_to_smc_with_parameter(rdev,
1475                                                           PPSMC_MSG_ACPDPM_SetEnabledMask,
1476                                                           (1 << pi->acp_boot_level));
1477         }
1478
1479         return kv_enable_acp_dpm(rdev, !gate);
1480 }
1481
1482 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
1483 {
1484         struct kv_power_info *pi = kv_get_pi(rdev);
1485
1486         if (pi->uvd_power_gated == gate)
1487                 return;
1488
1489         pi->uvd_power_gated = gate;
1490
1491         if (gate) {
1492                 if (pi->caps_uvd_pg) {
1493                         uvd_v1_0_stop(rdev);
1494                         cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
1495                 }
1496                 kv_update_uvd_dpm(rdev, gate);
1497                 if (pi->caps_uvd_pg)
1498                         kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF);
1499         } else {
1500                 if (pi->caps_uvd_pg) {
1501                         kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON);
1502                         uvd_v4_2_resume(rdev);
1503                         uvd_v1_0_start(rdev);
1504                         cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
1505                 }
1506                 kv_update_uvd_dpm(rdev, gate);
1507         }
1508 }
1509
1510 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate)
1511 {
1512         struct kv_power_info *pi = kv_get_pi(rdev);
1513
1514         if (pi->vce_power_gated == gate)
1515                 return;
1516
1517         pi->vce_power_gated = gate;
1518
1519         if (gate) {
1520                 if (pi->caps_vce_pg)
1521                         kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF);
1522         } else {
1523                 if (pi->caps_vce_pg)
1524                         kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON);
1525         }
1526 }
1527
1528 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate)
1529 {
1530         struct kv_power_info *pi = kv_get_pi(rdev);
1531
1532         if (pi->samu_power_gated == gate)
1533                 return;
1534
1535         pi->samu_power_gated = gate;
1536
1537         if (gate) {
1538                 kv_update_samu_dpm(rdev, true);
1539                 if (pi->caps_samu_pg)
1540                         kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF);
1541         } else {
1542                 if (pi->caps_samu_pg)
1543                         kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON);
1544                 kv_update_samu_dpm(rdev, false);
1545         }
1546 }
1547
1548 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
1549 {
1550         struct kv_power_info *pi = kv_get_pi(rdev);
1551
1552         if (pi->acp_power_gated == gate)
1553                 return;
1554
1555         if (rdev->family == CHIP_KABINI)
1556                 return;
1557
1558         pi->acp_power_gated = gate;
1559
1560         if (gate) {
1561                 kv_update_acp_dpm(rdev, true);
1562                 if (pi->caps_acp_pg)
1563                         kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF);
1564         } else {
1565                 if (pi->caps_acp_pg)
1566                         kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON);
1567                 kv_update_acp_dpm(rdev, false);
1568         }
1569 }
1570
1571 static void kv_set_valid_clock_range(struct radeon_device *rdev,
1572                                      struct radeon_ps *new_rps)
1573 {
1574         struct kv_ps *new_ps = kv_get_ps(new_rps);
1575         struct kv_power_info *pi = kv_get_pi(rdev);
1576         u32 i;
1577         struct radeon_clock_voltage_dependency_table *table =
1578                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1579
1580         if (table && table->count) {
1581                 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1582                         if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
1583                             (i == (pi->graphics_dpm_level_count - 1))) {
1584                                 pi->lowest_valid = i;
1585                                 break;
1586                         }
1587                 }
1588
1589                 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1590                         if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
1591                                 break;
1592                 }
1593                 pi->highest_valid = i;
1594
1595                 if (pi->lowest_valid > pi->highest_valid) {
1596                         if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
1597                             (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
1598                                 pi->highest_valid = pi->lowest_valid;
1599                         else
1600                                 pi->lowest_valid =  pi->highest_valid;
1601                 }
1602         } else {
1603                 struct sumo_sclk_voltage_mapping_table *table =
1604                         &pi->sys_info.sclk_voltage_mapping_table;
1605
1606                 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
1607                         if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
1608                             i == (int)(pi->graphics_dpm_level_count - 1)) {
1609                                 pi->lowest_valid = i;
1610                                 break;
1611                         }
1612                 }
1613
1614                 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1615                         if (table->entries[i].sclk_frequency <=
1616                             new_ps->levels[new_ps->num_levels - 1].sclk)
1617                                 break;
1618                 }
1619                 pi->highest_valid = i;
1620
1621                 if (pi->lowest_valid > pi->highest_valid) {
1622                         if ((new_ps->levels[0].sclk -
1623                              table->entries[pi->highest_valid].sclk_frequency) >
1624                             (table->entries[pi->lowest_valid].sclk_frequency -
1625                              new_ps->levels[new_ps->num_levels -1].sclk))
1626                                 pi->highest_valid = pi->lowest_valid;
1627                         else
1628                                 pi->lowest_valid =  pi->highest_valid;
1629                 }
1630         }
1631 }
1632
1633 static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
1634                                          struct radeon_ps *new_rps)
1635 {
1636         struct kv_ps *new_ps = kv_get_ps(new_rps);
1637         struct kv_power_info *pi = kv_get_pi(rdev);
1638         int ret = 0;
1639         u8 clk_bypass_cntl;
1640
1641         if (pi->caps_enable_dfs_bypass) {
1642                 clk_bypass_cntl = new_ps->need_dfs_bypass ?
1643                         pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
1644                 ret = kv_copy_bytes_to_smc(rdev,
1645                                            (pi->dpm_table_start +
1646                                             offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
1647                                             (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
1648                                             offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
1649                                            &clk_bypass_cntl,
1650                                            sizeof(u8), pi->sram_end);
1651         }
1652
1653         return ret;
1654 }
1655
1656 static int kv_enable_nb_dpm(struct radeon_device *rdev)
1657 {
1658         struct kv_power_info *pi = kv_get_pi(rdev);
1659         int ret = 0;
1660
1661         if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
1662                 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
1663                 if (ret == 0)
1664                         pi->nb_dpm_enabled = true;
1665         }
1666
1667         return ret;
1668 }
1669
1670 int kv_dpm_force_performance_level(struct radeon_device *rdev,
1671                                    enum radeon_dpm_forced_level level)
1672 {
1673         int ret;
1674
1675         if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
1676                 ret = kv_force_dpm_highest(rdev);
1677                 if (ret)
1678                         return ret;
1679         } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
1680                 ret = kv_force_dpm_lowest(rdev);
1681                 if (ret)
1682                         return ret;
1683         } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
1684                 ret = kv_unforce_levels(rdev);
1685                 if (ret)
1686                         return ret;
1687         }
1688
1689         rdev->pm.dpm.forced_level = level;
1690
1691         return 0;
1692 }
1693
1694 int kv_dpm_pre_set_power_state(struct radeon_device *rdev)
1695 {
1696         struct kv_power_info *pi = kv_get_pi(rdev);
1697         struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
1698         struct radeon_ps *new_ps = &requested_ps;
1699
1700         kv_update_requested_ps(rdev, new_ps);
1701
1702         kv_apply_state_adjust_rules(rdev,
1703                                     &pi->requested_rps,
1704                                     &pi->current_rps);
1705
1706         return 0;
1707 }
1708
1709 int kv_dpm_set_power_state(struct radeon_device *rdev)
1710 {
1711         struct kv_power_info *pi = kv_get_pi(rdev);
1712         struct radeon_ps *new_ps = &pi->requested_rps;
1713         /*struct radeon_ps *old_ps = &pi->current_rps;*/
1714         int ret;
1715
1716         cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
1717                              RADEON_CG_BLOCK_SDMA |
1718                              RADEON_CG_BLOCK_BIF |
1719                              RADEON_CG_BLOCK_HDP), false);
1720
1721         if (rdev->family == CHIP_KABINI) {
1722                 if (pi->enable_dpm) {
1723                         kv_set_valid_clock_range(rdev, new_ps);
1724                         kv_update_dfs_bypass_settings(rdev, new_ps);
1725                         ret = kv_calculate_ds_divider(rdev);
1726                         if (ret) {
1727                                 DRM_ERROR("kv_calculate_ds_divider failed\n");
1728                                 return ret;
1729                         }
1730                         kv_calculate_nbps_level_settings(rdev);
1731                         kv_calculate_dpm_settings(rdev);
1732                         kv_force_lowest_valid(rdev);
1733                         kv_enable_new_levels(rdev);
1734                         kv_upload_dpm_settings(rdev);
1735                         kv_program_nbps_index_settings(rdev, new_ps);
1736                         kv_unforce_levels(rdev);
1737                         kv_set_enabled_levels(rdev);
1738                         kv_force_lowest_valid(rdev);
1739                         kv_unforce_levels(rdev);
1740 #if 0
1741                         ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
1742                         if (ret) {
1743                                 DRM_ERROR("kv_update_vce_dpm failed\n");
1744                                 return ret;
1745                         }
1746 #endif
1747                         kv_update_sclk_t(rdev);
1748                 }
1749         } else {
1750                 if (pi->enable_dpm) {
1751                         kv_set_valid_clock_range(rdev, new_ps);
1752                         kv_update_dfs_bypass_settings(rdev, new_ps);
1753                         ret = kv_calculate_ds_divider(rdev);
1754                         if (ret) {
1755                                 DRM_ERROR("kv_calculate_ds_divider failed\n");
1756                                 return ret;
1757                         }
1758                         kv_calculate_nbps_level_settings(rdev);
1759                         kv_calculate_dpm_settings(rdev);
1760                         kv_freeze_sclk_dpm(rdev, true);
1761                         kv_upload_dpm_settings(rdev);
1762                         kv_program_nbps_index_settings(rdev, new_ps);
1763                         kv_freeze_sclk_dpm(rdev, false);
1764                         kv_set_enabled_levels(rdev);
1765 #if 0
1766                         ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
1767                         if (ret) {
1768                                 DRM_ERROR("kv_update_vce_dpm failed\n");
1769                                 return ret;
1770                         }
1771 #endif
1772                         kv_update_sclk_t(rdev);
1773                         kv_enable_nb_dpm(rdev);
1774                 }
1775         }
1776
1777         cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
1778                              RADEON_CG_BLOCK_SDMA |
1779                              RADEON_CG_BLOCK_BIF |
1780                              RADEON_CG_BLOCK_HDP), true);
1781
1782         rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1783         return 0;
1784 }
1785
1786 void kv_dpm_post_set_power_state(struct radeon_device *rdev)
1787 {
1788         struct kv_power_info *pi = kv_get_pi(rdev);
1789         struct radeon_ps *new_ps = &pi->requested_rps;
1790
1791         kv_update_current_ps(rdev, new_ps);
1792 }
1793
1794 void kv_dpm_setup_asic(struct radeon_device *rdev)
1795 {
1796         sumo_take_smu_control(rdev, true);
1797         kv_init_powergate_state(rdev);
1798         kv_init_sclk_t(rdev);
1799 }
1800
1801 void kv_dpm_reset_asic(struct radeon_device *rdev)
1802 {
1803         kv_force_lowest_valid(rdev);
1804         kv_init_graphics_levels(rdev);
1805         kv_program_bootup_state(rdev);
1806         kv_upload_dpm_settings(rdev);
1807         kv_force_lowest_valid(rdev);
1808         kv_unforce_levels(rdev);
1809 }
1810
1811 //XXX use sumo_dpm_display_configuration_changed
1812
1813 static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
1814                                                 struct radeon_clock_and_voltage_limits *table)
1815 {
1816         struct kv_power_info *pi = kv_get_pi(rdev);
1817
1818         if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
1819                 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
1820                 table->sclk =
1821                         pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
1822                 table->vddc =
1823                         kv_convert_2bit_index_to_voltage(rdev,
1824                                                          pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
1825         }
1826
1827         table->mclk = pi->sys_info.nbp_memory_clock[0];
1828 }
1829
1830 static void kv_patch_voltage_values(struct radeon_device *rdev)
1831 {
1832         int i;
1833         struct radeon_uvd_clock_voltage_dependency_table *table =
1834                 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1835
1836         if (table->count) {
1837                 for (i = 0; i < table->count; i++)
1838                         table->entries[i].v =
1839                                 kv_convert_8bit_index_to_voltage(rdev,
1840                                                                  table->entries[i].v);
1841         }
1842
1843 }
1844
1845 static void kv_construct_boot_state(struct radeon_device *rdev)
1846 {
1847         struct kv_power_info *pi = kv_get_pi(rdev);
1848
1849         pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
1850         pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
1851         pi->boot_pl.ds_divider_index = 0;
1852         pi->boot_pl.ss_divider_index = 0;
1853         pi->boot_pl.allow_gnb_slow = 1;
1854         pi->boot_pl.force_nbp_state = 0;
1855         pi->boot_pl.display_wm = 0;
1856         pi->boot_pl.vce_wm = 0;
1857 }
1858
1859 static int kv_force_dpm_highest(struct radeon_device *rdev)
1860 {
1861         int ret;
1862         u32 enable_mask, i;
1863
1864         ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
1865         if (ret)
1866                 return ret;
1867
1868         for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
1869                 if (enable_mask & (1 << i))
1870                         break;
1871         }
1872
1873         return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1874 }
1875
1876 static int kv_force_dpm_lowest(struct radeon_device *rdev)
1877 {
1878         int ret;
1879         u32 enable_mask, i;
1880
1881         ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
1882         if (ret)
1883                 return ret;
1884
1885         for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
1886                 if (enable_mask & (1 << i))
1887                         break;
1888         }
1889
1890         return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1891 }
1892
1893 static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1894                                              u32 sclk, u32 min_sclk_in_sr)
1895 {
1896         struct kv_power_info *pi = kv_get_pi(rdev);
1897         u32 i;
1898         u32 temp;
1899         u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ?
1900                 min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK;
1901
1902         if (sclk < min)
1903                 return 0;
1904
1905         if (!pi->caps_sclk_ds)
1906                 return 0;
1907
1908         for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
1909                 temp = sclk / sumo_get_sleep_divider_from_id(i);
1910                 if (temp >= min)
1911                         break;
1912         }
1913
1914         return (u8)i;
1915 }
1916
1917 static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit)
1918 {
1919         struct kv_power_info *pi = kv_get_pi(rdev);
1920         struct radeon_clock_voltage_dependency_table *table =
1921                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1922         int i;
1923
1924         if (table && table->count) {
1925                 for (i = table->count - 1; i >= 0; i--) {
1926                         if (pi->high_voltage_t &&
1927                             (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <=
1928                              pi->high_voltage_t)) {
1929                                 *limit = i;
1930                                 return 0;
1931                         }
1932                 }
1933         } else {
1934                 struct sumo_sclk_voltage_mapping_table *table =
1935                         &pi->sys_info.sclk_voltage_mapping_table;
1936
1937                 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
1938                         if (pi->high_voltage_t &&
1939                             (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <=
1940                              pi->high_voltage_t)) {
1941                                 *limit = i;
1942                                 return 0;
1943                         }
1944                 }
1945         }
1946
1947         *limit = 0;
1948         return 0;
1949 }
1950
1951 static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
1952                                         struct radeon_ps *new_rps,
1953                                         struct radeon_ps *old_rps)
1954 {
1955         struct kv_ps *ps = kv_get_ps(new_rps);
1956         struct kv_power_info *pi = kv_get_pi(rdev);
1957         u32 min_sclk = 10000; /* ??? */
1958         u32 sclk, mclk = 0;
1959         int i, limit;
1960         bool force_high;
1961         struct radeon_clock_voltage_dependency_table *table =
1962                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1963         u32 stable_p_state_sclk = 0;
1964         struct radeon_clock_and_voltage_limits *max_limits =
1965                 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
1966
1967         mclk = max_limits->mclk;
1968         sclk = min_sclk;
1969
1970         if (pi->caps_stable_p_state) {
1971                 stable_p_state_sclk = (max_limits->sclk * 75) / 100;
1972
1973                 for (i = table->count - 1; i >= 0; i++) {
1974                         if (stable_p_state_sclk >= table->entries[i].clk) {
1975                                 stable_p_state_sclk = table->entries[i].clk;
1976                                 break;
1977                         }
1978                 }
1979
1980                 if (i > 0)
1981                         stable_p_state_sclk = table->entries[0].clk;
1982
1983                 sclk = stable_p_state_sclk;
1984         }
1985
1986         ps->need_dfs_bypass = true;
1987
1988         for (i = 0; i < ps->num_levels; i++) {
1989                 if (ps->levels[i].sclk < sclk)
1990                         ps->levels[i].sclk = sclk;
1991         }
1992
1993         if (table && table->count) {
1994                 for (i = 0; i < ps->num_levels; i++) {
1995                         if (pi->high_voltage_t &&
1996                             (pi->high_voltage_t <
1997                              kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
1998                                 kv_get_high_voltage_limit(rdev, &limit);
1999                                 ps->levels[i].sclk = table->entries[limit].clk;
2000                         }
2001                 }
2002         } else {
2003                 struct sumo_sclk_voltage_mapping_table *table =
2004                         &pi->sys_info.sclk_voltage_mapping_table;
2005
2006                 for (i = 0; i < ps->num_levels; i++) {
2007                         if (pi->high_voltage_t &&
2008                             (pi->high_voltage_t <
2009                              kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
2010                                 kv_get_high_voltage_limit(rdev, &limit);
2011                                 ps->levels[i].sclk = table->entries[limit].sclk_frequency;
2012                         }
2013                 }
2014         }
2015
2016         if (pi->caps_stable_p_state) {
2017                 for (i = 0; i < ps->num_levels; i++) {
2018                         ps->levels[i].sclk = stable_p_state_sclk;
2019                 }
2020         }
2021
2022         pi->video_start = new_rps->dclk || new_rps->vclk;
2023
2024         if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
2025             ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
2026                 pi->battery_state = true;
2027         else
2028                 pi->battery_state = false;
2029
2030         if (rdev->family == CHIP_KABINI) {
2031                 ps->dpm0_pg_nb_ps_lo = 0x1;
2032                 ps->dpm0_pg_nb_ps_hi = 0x0;
2033                 ps->dpmx_nb_ps_lo = 0x1;
2034                 ps->dpmx_nb_ps_hi = 0x0;
2035         } else {
2036                 ps->dpm0_pg_nb_ps_lo = 0x1;
2037                 ps->dpm0_pg_nb_ps_hi = 0x0;
2038                 ps->dpmx_nb_ps_lo = 0x2;
2039                 ps->dpmx_nb_ps_hi = 0x1;
2040
2041                 if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
2042                         force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2043                                 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
2044                                 pi->disable_nb_ps3_in_battery;
2045                         ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
2046                         ps->dpm0_pg_nb_ps_hi = 0x2;
2047                         ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
2048                         ps->dpmx_nb_ps_hi = 0x2;
2049                 }
2050         }
2051 }
2052
2053 static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev,
2054                                                     u32 index, bool enable)
2055 {
2056         struct kv_power_info *pi = kv_get_pi(rdev);
2057
2058         pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
2059 }
2060
2061 static int kv_calculate_ds_divider(struct radeon_device *rdev)
2062 {
2063         struct kv_power_info *pi = kv_get_pi(rdev);
2064         u32 sclk_in_sr = 10000; /* ??? */
2065         u32 i;
2066
2067         if (pi->lowest_valid > pi->highest_valid)
2068                 return -EINVAL;
2069
2070         for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2071                 pi->graphics_level[i].DeepSleepDivId =
2072                         kv_get_sleep_divider_id_from_clock(rdev,
2073                                                            be32_to_cpu(pi->graphics_level[i].SclkFrequency),
2074                                                            sclk_in_sr);
2075         }
2076         return 0;
2077 }
2078
2079 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
2080 {
2081         struct kv_power_info *pi = kv_get_pi(rdev);
2082         u32 i;
2083         bool force_high;
2084         struct radeon_clock_and_voltage_limits *max_limits =
2085                 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2086         u32 mclk = max_limits->mclk;
2087
2088         if (pi->lowest_valid > pi->highest_valid)
2089                 return -EINVAL;
2090
2091         if (rdev->family == CHIP_KABINI) {
2092                 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2093                         pi->graphics_level[i].GnbSlow = 1;
2094                         pi->graphics_level[i].ForceNbPs1 = 0;
2095                         pi->graphics_level[i].UpH = 0;
2096                 }
2097
2098                 if (!pi->sys_info.nb_dpm_enable)
2099                         return 0;
2100
2101                 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2102                               (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
2103
2104                 if (force_high) {
2105                         for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2106                                 pi->graphics_level[i].GnbSlow = 0;
2107                 } else {
2108                         if (pi->battery_state)
2109                                 pi->graphics_level[0].ForceNbPs1 = 1;
2110
2111                         pi->graphics_level[1].GnbSlow = 0;
2112                         pi->graphics_level[2].GnbSlow = 0;
2113                         pi->graphics_level[3].GnbSlow = 0;
2114                         pi->graphics_level[4].GnbSlow = 0;
2115                 }
2116         } else {
2117                 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2118                         pi->graphics_level[i].GnbSlow = 1;
2119                         pi->graphics_level[i].ForceNbPs1 = 0;
2120                         pi->graphics_level[i].UpH = 0;
2121                 }
2122
2123                 if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
2124                         pi->graphics_level[pi->lowest_valid].UpH = 0x28;
2125                         pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
2126                         if (pi->lowest_valid != pi->highest_valid)
2127                                 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
2128                 }
2129         }
2130         return 0;
2131 }
2132
2133 static int kv_calculate_dpm_settings(struct radeon_device *rdev)
2134 {
2135         struct kv_power_info *pi = kv_get_pi(rdev);
2136         u32 i;
2137
2138         if (pi->lowest_valid > pi->highest_valid)
2139                 return -EINVAL;
2140
2141         for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2142                 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
2143
2144         return 0;
2145 }
2146
2147 static void kv_init_graphics_levels(struct radeon_device *rdev)
2148 {
2149         struct kv_power_info *pi = kv_get_pi(rdev);
2150         u32 i;
2151         struct radeon_clock_voltage_dependency_table *table =
2152                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2153
2154         if (table && table->count) {
2155                 u32 vid_2bit;
2156
2157                 pi->graphics_dpm_level_count = 0;
2158                 for (i = 0; i < table->count; i++) {
2159                         if (pi->high_voltage_t &&
2160                             (pi->high_voltage_t <
2161                              kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v)))
2162                                 break;
2163
2164                         kv_set_divider_value(rdev, i, table->entries[i].clk);
2165                         vid_2bit = sumo_convert_vid7_to_vid2(rdev,
2166                                                              &pi->sys_info.vid_mapping_table,
2167                                                              table->entries[i].v);
2168                         kv_set_vid(rdev, i, vid_2bit);
2169                         kv_set_at(rdev, i, pi->at[i]);
2170                         kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
2171                         pi->graphics_dpm_level_count++;
2172                 }
2173         } else {
2174                 struct sumo_sclk_voltage_mapping_table *table =
2175                         &pi->sys_info.sclk_voltage_mapping_table;
2176
2177                 pi->graphics_dpm_level_count = 0;
2178                 for (i = 0; i < table->num_max_dpm_entries; i++) {
2179                         if (pi->high_voltage_t &&
2180                             pi->high_voltage_t <
2181                             kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit))
2182                                 break;
2183
2184                         kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency);
2185                         kv_set_vid(rdev, i, table->entries[i].vid_2bit);
2186                         kv_set_at(rdev, i, pi->at[i]);
2187                         kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
2188                         pi->graphics_dpm_level_count++;
2189                 }
2190         }
2191
2192         for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
2193                 kv_dpm_power_level_enable(rdev, i, false);
2194 }
2195
2196 static void kv_enable_new_levels(struct radeon_device *rdev)
2197 {
2198         struct kv_power_info *pi = kv_get_pi(rdev);
2199         u32 i;
2200
2201         for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
2202                 if (i >= pi->lowest_valid && i <= pi->highest_valid)
2203                         kv_dpm_power_level_enable(rdev, i, true);
2204         }
2205 }
2206
2207 static int kv_set_enabled_levels(struct radeon_device *rdev)
2208 {
2209         struct kv_power_info *pi = kv_get_pi(rdev);
2210         u32 i, new_mask = 0;
2211
2212         for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2213                 new_mask |= (1 << i);
2214
2215         return kv_send_msg_to_smc_with_parameter(rdev,
2216                                                  PPSMC_MSG_SCLKDPM_SetEnabledMask,
2217                                                  new_mask);
2218 }
2219
2220 static void kv_program_nbps_index_settings(struct radeon_device *rdev,
2221                                            struct radeon_ps *new_rps)
2222 {
2223         struct kv_ps *new_ps = kv_get_ps(new_rps);
2224         struct kv_power_info *pi = kv_get_pi(rdev);
2225         u32 nbdpmconfig1;
2226
2227         if (rdev->family == CHIP_KABINI)
2228                 return;
2229
2230         if (pi->sys_info.nb_dpm_enable) {
2231                 nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1);
2232                 nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK |
2233                                   DpmXNbPsLo_MASK | DpmXNbPsHi_MASK);
2234                 nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) |
2235                                  Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) |
2236                                  DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) |
2237                                  DpmXNbPsHi(new_ps->dpmx_nb_ps_hi));
2238                 WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1);
2239         }
2240 }
2241
2242 static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
2243                                             int min_temp, int max_temp)
2244 {
2245         int low_temp = 0 * 1000;
2246         int high_temp = 255 * 1000;
2247         u32 tmp;
2248
2249         if (low_temp < min_temp)
2250                 low_temp = min_temp;
2251         if (high_temp > max_temp)
2252                 high_temp = max_temp;
2253         if (high_temp < low_temp) {
2254                 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
2255                 return -EINVAL;
2256         }
2257
2258         tmp = RREG32_SMC(CG_THERMAL_INT_CTRL);
2259         tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK);
2260         tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) |
2261                 DIG_THERM_INTL(49 + (low_temp / 1000)));
2262         WREG32_SMC(CG_THERMAL_INT_CTRL, tmp);
2263
2264         rdev->pm.dpm.thermal.min_temp = low_temp;
2265         rdev->pm.dpm.thermal.max_temp = high_temp;
2266
2267         return 0;
2268 }
2269
2270 union igp_info {
2271         struct _ATOM_INTEGRATED_SYSTEM_INFO info;
2272         struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
2273         struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
2274         struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
2275         struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
2276         struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
2277 };
2278
2279 static int kv_parse_sys_info_table(struct radeon_device *rdev)
2280 {
2281         struct kv_power_info *pi = kv_get_pi(rdev);
2282         struct radeon_mode_info *mode_info = &rdev->mode_info;
2283         int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
2284         union igp_info *igp_info;
2285         u8 frev, crev;
2286         u16 data_offset;
2287         int i;
2288
2289         if (atom_parse_data_header(mode_info->atom_context, index, NULL,
2290                                    &frev, &crev, &data_offset)) {
2291                 igp_info = (union igp_info *)(mode_info->atom_context->bios +
2292                                               data_offset);
2293
2294                 if (crev != 8) {
2295                         DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
2296                         return -EINVAL;
2297                 }
2298                 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
2299                 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
2300                 pi->sys_info.bootup_nb_voltage_index =
2301                         le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
2302                 if (igp_info->info_8.ucHtcTmpLmt == 0)
2303                         pi->sys_info.htc_tmp_lmt = 203;
2304                 else
2305                         pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
2306                 if (igp_info->info_8.ucHtcHystLmt == 0)
2307                         pi->sys_info.htc_hyst_lmt = 5;
2308                 else
2309                         pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
2310                 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
2311                         DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
2312                 }
2313
2314                 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
2315                         pi->sys_info.nb_dpm_enable = true;
2316                 else
2317                         pi->sys_info.nb_dpm_enable = false;
2318
2319                 for (i = 0; i < KV_NUM_NBPSTATES; i++) {
2320                         pi->sys_info.nbp_memory_clock[i] =
2321                                 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
2322                         pi->sys_info.nbp_n_clock[i] =
2323                                 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
2324                 }
2325                 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
2326                     SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
2327                         pi->caps_enable_dfs_bypass = true;
2328
2329                 sumo_construct_sclk_voltage_mapping_table(rdev,
2330                                                           &pi->sys_info.sclk_voltage_mapping_table,
2331                                                           igp_info->info_8.sAvail_SCLK);
2332
2333                 sumo_construct_vid_mapping_table(rdev,
2334                                                  &pi->sys_info.vid_mapping_table,
2335                                                  igp_info->info_8.sAvail_SCLK);
2336
2337                 kv_construct_max_power_limits_table(rdev,
2338                                                     &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
2339         }
2340         return 0;
2341 }
2342
2343 union power_info {
2344         struct _ATOM_POWERPLAY_INFO info;
2345         struct _ATOM_POWERPLAY_INFO_V2 info_2;
2346         struct _ATOM_POWERPLAY_INFO_V3 info_3;
2347         struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
2348         struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
2349         struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
2350 };
2351
2352 union pplib_clock_info {
2353         struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
2354         struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
2355         struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
2356         struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
2357 };
2358
2359 union pplib_power_state {
2360         struct _ATOM_PPLIB_STATE v1;
2361         struct _ATOM_PPLIB_STATE_V2 v2;
2362 };
2363
2364 static void kv_patch_boot_state(struct radeon_device *rdev,
2365                                 struct kv_ps *ps)
2366 {
2367         struct kv_power_info *pi = kv_get_pi(rdev);
2368
2369         ps->num_levels = 1;
2370         ps->levels[0] = pi->boot_pl;
2371 }
2372
2373 static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev,
2374                                           struct radeon_ps *rps,
2375                                           struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
2376                                           u8 table_rev)
2377 {
2378         struct kv_ps *ps = kv_get_ps(rps);
2379
2380         rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
2381         rps->class = le16_to_cpu(non_clock_info->usClassification);
2382         rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
2383
2384         if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
2385                 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
2386                 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
2387         } else {
2388                 rps->vclk = 0;
2389                 rps->dclk = 0;
2390         }
2391
2392         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
2393                 rdev->pm.dpm.boot_ps = rps;
2394                 kv_patch_boot_state(rdev, ps);
2395         }
2396         if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
2397                 rdev->pm.dpm.uvd_ps = rps;
2398 }
2399
2400 static void kv_parse_pplib_clock_info(struct radeon_device *rdev,
2401                                       struct radeon_ps *rps, int index,
2402                                         union pplib_clock_info *clock_info)
2403 {
2404         struct kv_power_info *pi = kv_get_pi(rdev);
2405         struct kv_ps *ps = kv_get_ps(rps);
2406         struct kv_pl *pl = &ps->levels[index];
2407         u32 sclk;
2408
2409         sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
2410         sclk |= clock_info->sumo.ucEngineClockHigh << 16;
2411         pl->sclk = sclk;
2412         pl->vddc_index = clock_info->sumo.vddcIndex;
2413
2414         ps->num_levels = index + 1;
2415
2416         if (pi->caps_sclk_ds) {
2417                 pl->ds_divider_index = 5;
2418                 pl->ss_divider_index = 5;
2419         }
2420 }
2421
2422 static int kv_parse_power_table(struct radeon_device *rdev)
2423 {
2424         struct radeon_mode_info *mode_info = &rdev->mode_info;
2425         struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
2426         union pplib_power_state *power_state;
2427         int i, j, k, non_clock_array_index, clock_array_index;
2428         union pplib_clock_info *clock_info;
2429         struct _StateArray *state_array;
2430         struct _ClockInfoArray *clock_info_array;
2431         struct _NonClockInfoArray *non_clock_info_array;
2432         union power_info *power_info;
2433         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2434         u16 data_offset;
2435         u8 frev, crev;
2436         u8 *power_state_offset;
2437         struct kv_ps *ps;
2438
2439         if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
2440                                    &frev, &crev, &data_offset))
2441                 return -EINVAL;
2442         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2443
2444         state_array = (struct _StateArray *)
2445                 (mode_info->atom_context->bios + data_offset +
2446                  le16_to_cpu(power_info->pplib.usStateArrayOffset));
2447         clock_info_array = (struct _ClockInfoArray *)
2448                 (mode_info->atom_context->bios + data_offset +
2449                  le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
2450         non_clock_info_array = (struct _NonClockInfoArray *)
2451                 (mode_info->atom_context->bios + data_offset +
2452                  le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
2453
2454         rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
2455                                   state_array->ucNumEntries, GFP_KERNEL);
2456         if (!rdev->pm.dpm.ps)
2457                 return -ENOMEM;
2458         power_state_offset = (u8 *)state_array->states;
2459         rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
2460         rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
2461         rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
2462         for (i = 0; i < state_array->ucNumEntries; i++) {
2463                 u8 *idx;
2464                 power_state = (union pplib_power_state *)power_state_offset;
2465                 non_clock_array_index = power_state->v2.nonClockInfoIndex;
2466                 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2467                         &non_clock_info_array->nonClockInfo[non_clock_array_index];
2468                 if (!rdev->pm.power_state[i].clock_info)
2469                         return -EINVAL;
2470                 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
2471                 if (ps == NULL) {
2472                         kfree(rdev->pm.dpm.ps);
2473                         return -ENOMEM;
2474                 }
2475                 rdev->pm.dpm.ps[i].ps_priv = ps;
2476                 k = 0;
2477                 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
2478                 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2479                         clock_array_index = idx[j];
2480                         if (clock_array_index >= clock_info_array->ucNumEntries)
2481                                 continue;
2482                         if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
2483                                 break;
2484                         clock_info = (union pplib_clock_info *)
2485                                 ((u8 *)&clock_info_array->clockInfo[0] +
2486                                  (clock_array_index * clock_info_array->ucEntrySize));
2487                         kv_parse_pplib_clock_info(rdev,
2488                                                   &rdev->pm.dpm.ps[i], k,
2489                                                   clock_info);
2490                         k++;
2491                 }
2492                 kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
2493                                               non_clock_info,
2494                                               non_clock_info_array->ucEntrySize);
2495                 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
2496         }
2497         rdev->pm.dpm.num_ps = state_array->ucNumEntries;
2498         return 0;
2499 }
2500
2501 int kv_dpm_init(struct radeon_device *rdev)
2502 {
2503         struct kv_power_info *pi;
2504         int ret, i;
2505
2506         pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
2507         if (pi == NULL)
2508                 return -ENOMEM;
2509         rdev->pm.dpm.priv = pi;
2510
2511         ret = r600_parse_extended_power_table(rdev);
2512         if (ret)
2513                 return ret;
2514
2515         for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
2516                 pi->at[i] = TRINITY_AT_DFLT;
2517
2518         pi->sram_end = SMC_RAM_END;
2519
2520         if (rdev->family == CHIP_KABINI)
2521                 pi->high_voltage_t = 4001;
2522
2523         pi->enable_nb_dpm = true;
2524
2525         pi->caps_power_containment = true;
2526         pi->caps_cac = true;
2527         pi->enable_didt = false;
2528         if (pi->enable_didt) {
2529                 pi->caps_sq_ramping = true;
2530                 pi->caps_db_ramping = true;
2531                 pi->caps_td_ramping = true;
2532                 pi->caps_tcp_ramping = true;
2533         }
2534
2535         pi->caps_sclk_ds = true;
2536         pi->enable_auto_thermal_throttling = true;
2537         pi->disable_nb_ps3_in_battery = false;
2538         pi->bapm_enable = true;
2539         pi->voltage_drop_t = 0;
2540         pi->caps_sclk_throttle_low_notification = false;
2541         pi->caps_fps = false; /* true? */
2542         pi->caps_uvd_pg = true;
2543         pi->caps_uvd_dpm = true;
2544         pi->caps_vce_pg = false;
2545         pi->caps_samu_pg = false;
2546         pi->caps_acp_pg = false;
2547         pi->caps_stable_p_state = false;
2548
2549         ret = kv_parse_sys_info_table(rdev);
2550         if (ret)
2551                 return ret;
2552
2553         kv_patch_voltage_values(rdev);
2554         kv_construct_boot_state(rdev);
2555
2556         ret = kv_parse_power_table(rdev);
2557         if (ret)
2558                 return ret;
2559
2560         pi->enable_dpm = true;
2561
2562         return 0;
2563 }
2564
2565 void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
2566                                                     struct seq_file *m)
2567 {
2568         struct kv_power_info *pi = kv_get_pi(rdev);
2569         u32 current_index =
2570                 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
2571                 CURR_SCLK_INDEX_SHIFT;
2572         u32 sclk, tmp;
2573         u16 vddc;
2574
2575         if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
2576                 seq_printf(m, "invalid dpm profile %d\n", current_index);
2577         } else {
2578                 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
2579                 tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
2580                         SMU_VOLTAGE_CURRENT_LEVEL_SHIFT;
2581                 vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp);
2582                 seq_printf(m, "power level %d    sclk: %u vddc: %u\n",
2583                            current_index, sclk, vddc);
2584         }
2585 }
2586
2587 void kv_dpm_print_power_state(struct radeon_device *rdev,
2588                               struct radeon_ps *rps)
2589 {
2590         int i;
2591         struct kv_ps *ps = kv_get_ps(rps);
2592
2593         r600_dpm_print_class_info(rps->class, rps->class2);
2594         r600_dpm_print_cap_info(rps->caps);
2595         printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2596         for (i = 0; i < ps->num_levels; i++) {
2597                 struct kv_pl *pl = &ps->levels[i];
2598                 printk("\t\tpower level %d    sclk: %u vddc: %u\n",
2599                        i, pl->sclk,
2600                        kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index));
2601         }
2602         r600_dpm_print_ps_status(rdev, rps);
2603 }
2604
2605 void kv_dpm_fini(struct radeon_device *rdev)
2606 {
2607         int i;
2608
2609         for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2610                 kfree(rdev->pm.dpm.ps[i].ps_priv);
2611         }
2612         kfree(rdev->pm.dpm.ps);
2613         kfree(rdev->pm.dpm.priv);
2614         r600_free_extended_power_table(rdev);
2615 }
2616
2617 void kv_dpm_display_configuration_changed(struct radeon_device *rdev)
2618 {
2619
2620 }
2621
2622 u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low)
2623 {
2624         struct kv_power_info *pi = kv_get_pi(rdev);
2625         struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
2626
2627         if (low)
2628                 return requested_state->levels[0].sclk;
2629         else
2630                 return requested_state->levels[requested_state->num_levels - 1].sclk;
2631 }
2632
2633 u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low)
2634 {
2635         struct kv_power_info *pi = kv_get_pi(rdev);
2636
2637         return pi->sys_info.bootup_uma_clk;
2638 }
2639