]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/amd/amdgpu/ci_dpm.c
Merge remote-tracking branch 'input-current/for-linus'
[karo-tx-linux.git] / drivers / gpu / drm / amd / amdgpu / ci_dpm.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include "drmP.h"
26 #include "amdgpu.h"
27 #include "amdgpu_pm.h"
28 #include "amdgpu_ucode.h"
29 #include "cikd.h"
30 #include "amdgpu_dpm.h"
31 #include "ci_dpm.h"
32 #include "gfx_v7_0.h"
33 #include "atom.h"
34 #include <linux/seq_file.h>
35
36 #include "smu/smu_7_0_1_d.h"
37 #include "smu/smu_7_0_1_sh_mask.h"
38
39 #include "dce/dce_8_0_d.h"
40 #include "dce/dce_8_0_sh_mask.h"
41
42 #include "bif/bif_4_1_d.h"
43 #include "bif/bif_4_1_sh_mask.h"
44
45 #include "gca/gfx_7_2_d.h"
46 #include "gca/gfx_7_2_sh_mask.h"
47
48 #include "gmc/gmc_7_1_d.h"
49 #include "gmc/gmc_7_1_sh_mask.h"
50
51 MODULE_FIRMWARE("radeon/bonaire_smc.bin");
52 MODULE_FIRMWARE("radeon/hawaii_smc.bin");
53
54 #define MC_CG_ARB_FREQ_F0           0x0a
55 #define MC_CG_ARB_FREQ_F1           0x0b
56 #define MC_CG_ARB_FREQ_F2           0x0c
57 #define MC_CG_ARB_FREQ_F3           0x0d
58
59 #define SMC_RAM_END 0x40000
60
61 #define VOLTAGE_SCALE               4
62 #define VOLTAGE_VID_OFFSET_SCALE1    625
63 #define VOLTAGE_VID_OFFSET_SCALE2    100
64
65 static const struct ci_pt_defaults defaults_hawaii_xt =
66 {
67         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
68         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
69         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
70 };
71
72 static const struct ci_pt_defaults defaults_hawaii_pro =
73 {
74         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
75         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
76         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
77 };
78
79 static const struct ci_pt_defaults defaults_bonaire_xt =
80 {
81         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
82         { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
83         { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
84 };
85
86 static const struct ci_pt_defaults defaults_bonaire_pro =
87 {
88         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
89         { 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
90         { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
91 };
92
93 static const struct ci_pt_defaults defaults_saturn_xt =
94 {
95         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
96         { 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
97         { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
98 };
99
100 static const struct ci_pt_defaults defaults_saturn_pro =
101 {
102         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
103         { 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
104         { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
105 };
106
107 static const struct ci_pt_config_reg didt_config_ci[] =
108 {
109         { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110         { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111         { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112         { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113         { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114         { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115         { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116         { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117         { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118         { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119         { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120         { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121         { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
122         { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
123         { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
124         { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
125         { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
126         { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127         { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128         { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129         { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130         { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131         { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132         { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133         { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134         { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135         { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136         { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137         { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138         { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139         { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
140         { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
141         { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
142         { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
143         { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
144         { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145         { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146         { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
147         { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
148         { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
149         { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
150         { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
151         { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152         { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153         { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154         { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155         { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156         { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
157         { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
158         { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
159         { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
160         { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
161         { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
162         { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
163         { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
164         { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
165         { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
166         { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
167         { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
168         { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
169         { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
170         { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
171         { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
172         { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
173         { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
174         { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
175         { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
176         { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
177         { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
178         { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
179         { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
180         { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
181         { 0xFFFFFFFF }
182 };
183
184 static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
185 {
186         return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
187 }
188
189 #define MC_CG_ARB_FREQ_F0           0x0a
190 #define MC_CG_ARB_FREQ_F1           0x0b
191 #define MC_CG_ARB_FREQ_F2           0x0c
192 #define MC_CG_ARB_FREQ_F3           0x0d
193
194 static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
195                                        u32 arb_freq_src, u32 arb_freq_dest)
196 {
197         u32 mc_arb_dram_timing;
198         u32 mc_arb_dram_timing2;
199         u32 burst_time;
200         u32 mc_cg_config;
201
202         switch (arb_freq_src) {
203         case MC_CG_ARB_FREQ_F0:
204                 mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
205                 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
206                 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
207                          MC_ARB_BURST_TIME__STATE0__SHIFT;
208                 break;
209         case MC_CG_ARB_FREQ_F1:
210                 mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING_1);
211                 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
212                 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
213                          MC_ARB_BURST_TIME__STATE1__SHIFT;
214                 break;
215         default:
216                 return -EINVAL;
217         }
218
219         switch (arb_freq_dest) {
220         case MC_CG_ARB_FREQ_F0:
221                 WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
222                 WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
223                 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
224                         ~MC_ARB_BURST_TIME__STATE0_MASK);
225                 break;
226         case MC_CG_ARB_FREQ_F1:
227                 WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
228                 WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
229                 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
230                         ~MC_ARB_BURST_TIME__STATE1_MASK);
231                 break;
232         default:
233                 return -EINVAL;
234         }
235
236         mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
237         WREG32(mmMC_CG_CONFIG, mc_cg_config);
238         WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
239                 ~MC_ARB_CG__CG_ARB_REQ_MASK);
240
241         return 0;
242 }
243
244 static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
245 {
246         u8 mc_para_index;
247
248         if (memory_clock < 10000)
249                 mc_para_index = 0;
250         else if (memory_clock >= 80000)
251                 mc_para_index = 0x0f;
252         else
253                 mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
254         return mc_para_index;
255 }
256
257 static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
258 {
259         u8 mc_para_index;
260
261         if (strobe_mode) {
262                 if (memory_clock < 12500)
263                         mc_para_index = 0x00;
264                 else if (memory_clock > 47500)
265                         mc_para_index = 0x0f;
266                 else
267                         mc_para_index = (u8)((memory_clock - 10000) / 2500);
268         } else {
269                 if (memory_clock < 65000)
270                         mc_para_index = 0x00;
271                 else if (memory_clock > 135000)
272                         mc_para_index = 0x0f;
273                 else
274                         mc_para_index = (u8)((memory_clock - 60000) / 5000);
275         }
276         return mc_para_index;
277 }
278
279 static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
280                                                      u32 max_voltage_steps,
281                                                      struct atom_voltage_table *voltage_table)
282 {
283         unsigned int i, diff;
284
285         if (voltage_table->count <= max_voltage_steps)
286                 return;
287
288         diff = voltage_table->count - max_voltage_steps;
289
290         for (i = 0; i < max_voltage_steps; i++)
291                 voltage_table->entries[i] = voltage_table->entries[i + diff];
292
293         voltage_table->count = max_voltage_steps;
294 }
295
296 static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
297                                          struct atom_voltage_table_entry *voltage_table,
298                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
299 static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
300 static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
301                                        u32 target_tdp);
302 static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
303 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
304 static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
305
306 static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
307                                                              PPSMC_Msg msg, u32 parameter);
308 static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
309 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
310
311 static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
312 {
313         struct ci_power_info *pi = adev->pm.dpm.priv;
314
315         return pi;
316 }
317
318 static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
319 {
320         struct ci_ps *ps = rps->ps_priv;
321
322         return ps;
323 }
324
325 static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
326 {
327         struct ci_power_info *pi = ci_get_pi(adev);
328
329         switch (adev->pdev->device) {
330         case 0x6649:
331         case 0x6650:
332         case 0x6651:
333         case 0x6658:
334         case 0x665C:
335         case 0x665D:
336         default:
337                 pi->powertune_defaults = &defaults_bonaire_xt;
338                 break;
339         case 0x6640:
340         case 0x6641:
341         case 0x6646:
342         case 0x6647:
343                 pi->powertune_defaults = &defaults_saturn_xt;
344                 break;
345         case 0x67B8:
346         case 0x67B0:
347                 pi->powertune_defaults = &defaults_hawaii_xt;
348                 break;
349         case 0x67BA:
350         case 0x67B1:
351                 pi->powertune_defaults = &defaults_hawaii_pro;
352                 break;
353         case 0x67A0:
354         case 0x67A1:
355         case 0x67A2:
356         case 0x67A8:
357         case 0x67A9:
358         case 0x67AA:
359         case 0x67B9:
360         case 0x67BE:
361                 pi->powertune_defaults = &defaults_bonaire_xt;
362                 break;
363         }
364
365         pi->dte_tj_offset = 0;
366
367         pi->caps_power_containment = true;
368         pi->caps_cac = false;
369         pi->caps_sq_ramping = false;
370         pi->caps_db_ramping = false;
371         pi->caps_td_ramping = false;
372         pi->caps_tcp_ramping = false;
373
374         if (pi->caps_power_containment) {
375                 pi->caps_cac = true;
376                 if (adev->asic_type == CHIP_HAWAII)
377                         pi->enable_bapm_feature = false;
378                 else
379                         pi->enable_bapm_feature = true;
380                 pi->enable_tdc_limit_feature = true;
381                 pi->enable_pkg_pwr_tracking_feature = true;
382         }
383 }
384
385 static u8 ci_convert_to_vid(u16 vddc)
386 {
387         return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
388 }
389
390 static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
391 {
392         struct ci_power_info *pi = ci_get_pi(adev);
393         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
394         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
395         u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
396         u32 i;
397
398         if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
399                 return -EINVAL;
400         if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
401                 return -EINVAL;
402         if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
403             adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
404                 return -EINVAL;
405
406         for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
407                 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
408                         lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
409                         hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
410                         hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
411                 } else {
412                         lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
413                         hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
414                 }
415         }
416         return 0;
417 }
418
419 static int ci_populate_vddc_vid(struct amdgpu_device *adev)
420 {
421         struct ci_power_info *pi = ci_get_pi(adev);
422         u8 *vid = pi->smc_powertune_table.VddCVid;
423         u32 i;
424
425         if (pi->vddc_voltage_table.count > 8)
426                 return -EINVAL;
427
428         for (i = 0; i < pi->vddc_voltage_table.count; i++)
429                 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
430
431         return 0;
432 }
433
434 static int ci_populate_svi_load_line(struct amdgpu_device *adev)
435 {
436         struct ci_power_info *pi = ci_get_pi(adev);
437         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
438
439         pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
440         pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
441         pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
442         pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
443
444         return 0;
445 }
446
447 static int ci_populate_tdc_limit(struct amdgpu_device *adev)
448 {
449         struct ci_power_info *pi = ci_get_pi(adev);
450         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
451         u16 tdc_limit;
452
453         tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
454         pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
455         pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
456                 pt_defaults->tdc_vddc_throttle_release_limit_perc;
457         pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
458
459         return 0;
460 }
461
462 static int ci_populate_dw8(struct amdgpu_device *adev)
463 {
464         struct ci_power_info *pi = ci_get_pi(adev);
465         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
466         int ret;
467
468         ret = amdgpu_ci_read_smc_sram_dword(adev,
469                                      SMU7_FIRMWARE_HEADER_LOCATION +
470                                      offsetof(SMU7_Firmware_Header, PmFuseTable) +
471                                      offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
472                                      (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
473                                      pi->sram_end);
474         if (ret)
475                 return -EINVAL;
476         else
477                 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
478
479         return 0;
480 }
481
482 static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
483 {
484         struct ci_power_info *pi = ci_get_pi(adev);
485
486         if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
487             (adev->pm.dpm.fan.fan_output_sensitivity == 0))
488                 adev->pm.dpm.fan.fan_output_sensitivity =
489                         adev->pm.dpm.fan.default_fan_output_sensitivity;
490
491         pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
492                 cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
493
494         return 0;
495 }
496
497 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
498 {
499         struct ci_power_info *pi = ci_get_pi(adev);
500         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
501         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
502         int i, min, max;
503
504         min = max = hi_vid[0];
505         for (i = 0; i < 8; i++) {
506                 if (0 != hi_vid[i]) {
507                         if (min > hi_vid[i])
508                                 min = hi_vid[i];
509                         if (max < hi_vid[i])
510                                 max = hi_vid[i];
511                 }
512
513                 if (0 != lo_vid[i]) {
514                         if (min > lo_vid[i])
515                                 min = lo_vid[i];
516                         if (max < lo_vid[i])
517                                 max = lo_vid[i];
518                 }
519         }
520
521         if ((min == 0) || (max == 0))
522                 return -EINVAL;
523         pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
524         pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
525
526         return 0;
527 }
528
529 static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
530 {
531         struct ci_power_info *pi = ci_get_pi(adev);
532         u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
533         u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
534         struct amdgpu_cac_tdp_table *cac_tdp_table =
535                 adev->pm.dpm.dyn_state.cac_tdp_table;
536
537         hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
538         lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
539
540         pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
541         pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
542
543         return 0;
544 }
545
546 static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
547 {
548         struct ci_power_info *pi = ci_get_pi(adev);
549         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
550         SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
551         struct amdgpu_cac_tdp_table *cac_tdp_table =
552                 adev->pm.dpm.dyn_state.cac_tdp_table;
553         struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
554         int i, j, k;
555         const u16 *def1;
556         const u16 *def2;
557
558         dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
559         dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
560
561         dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
562         dpm_table->GpuTjMax =
563                 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
564         dpm_table->GpuTjHyst = 8;
565
566         dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
567
568         if (ppm) {
569                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
570                 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
571         } else {
572                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
573                 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
574         }
575
576         dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
577         def1 = pt_defaults->bapmti_r;
578         def2 = pt_defaults->bapmti_rc;
579
580         for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
581                 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
582                         for (k = 0; k < SMU7_DTE_SINKS; k++) {
583                                 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
584                                 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
585                                 def1++;
586                                 def2++;
587                         }
588                 }
589         }
590
591         return 0;
592 }
593
594 static int ci_populate_pm_base(struct amdgpu_device *adev)
595 {
596         struct ci_power_info *pi = ci_get_pi(adev);
597         u32 pm_fuse_table_offset;
598         int ret;
599
600         if (pi->caps_power_containment) {
601                 ret = amdgpu_ci_read_smc_sram_dword(adev,
602                                              SMU7_FIRMWARE_HEADER_LOCATION +
603                                              offsetof(SMU7_Firmware_Header, PmFuseTable),
604                                              &pm_fuse_table_offset, pi->sram_end);
605                 if (ret)
606                         return ret;
607                 ret = ci_populate_bapm_vddc_vid_sidd(adev);
608                 if (ret)
609                         return ret;
610                 ret = ci_populate_vddc_vid(adev);
611                 if (ret)
612                         return ret;
613                 ret = ci_populate_svi_load_line(adev);
614                 if (ret)
615                         return ret;
616                 ret = ci_populate_tdc_limit(adev);
617                 if (ret)
618                         return ret;
619                 ret = ci_populate_dw8(adev);
620                 if (ret)
621                         return ret;
622                 ret = ci_populate_fuzzy_fan(adev);
623                 if (ret)
624                         return ret;
625                 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
626                 if (ret)
627                         return ret;
628                 ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
629                 if (ret)
630                         return ret;
631                 ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
632                                            (u8 *)&pi->smc_powertune_table,
633                                            sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
634                 if (ret)
635                         return ret;
636         }
637
638         return 0;
639 }
640
641 static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
642 {
643         struct ci_power_info *pi = ci_get_pi(adev);
644         u32 data;
645
646         if (pi->caps_sq_ramping) {
647                 data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
648                 if (enable)
649                         data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
650                 else
651                         data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
652                 WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
653         }
654
655         if (pi->caps_db_ramping) {
656                 data = RREG32_DIDT(ixDIDT_DB_CTRL0);
657                 if (enable)
658                         data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
659                 else
660                         data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
661                 WREG32_DIDT(ixDIDT_DB_CTRL0, data);
662         }
663
664         if (pi->caps_td_ramping) {
665                 data = RREG32_DIDT(ixDIDT_TD_CTRL0);
666                 if (enable)
667                         data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
668                 else
669                         data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
670                 WREG32_DIDT(ixDIDT_TD_CTRL0, data);
671         }
672
673         if (pi->caps_tcp_ramping) {
674                 data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
675                 if (enable)
676                         data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
677                 else
678                         data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
679                 WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
680         }
681 }
682
683 static int ci_program_pt_config_registers(struct amdgpu_device *adev,
684                                           const struct ci_pt_config_reg *cac_config_regs)
685 {
686         const struct ci_pt_config_reg *config_regs = cac_config_regs;
687         u32 data;
688         u32 cache = 0;
689
690         if (config_regs == NULL)
691                 return -EINVAL;
692
693         while (config_regs->offset != 0xFFFFFFFF) {
694                 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
695                         cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
696                 } else {
697                         switch (config_regs->type) {
698                         case CISLANDS_CONFIGREG_SMC_IND:
699                                 data = RREG32_SMC(config_regs->offset);
700                                 break;
701                         case CISLANDS_CONFIGREG_DIDT_IND:
702                                 data = RREG32_DIDT(config_regs->offset);
703                                 break;
704                         default:
705                                 data = RREG32(config_regs->offset);
706                                 break;
707                         }
708
709                         data &= ~config_regs->mask;
710                         data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
711                         data |= cache;
712
713                         switch (config_regs->type) {
714                         case CISLANDS_CONFIGREG_SMC_IND:
715                                 WREG32_SMC(config_regs->offset, data);
716                                 break;
717                         case CISLANDS_CONFIGREG_DIDT_IND:
718                                 WREG32_DIDT(config_regs->offset, data);
719                                 break;
720                         default:
721                                 WREG32(config_regs->offset, data);
722                                 break;
723                         }
724                         cache = 0;
725                 }
726                 config_regs++;
727         }
728         return 0;
729 }
730
731 static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
732 {
733         struct ci_power_info *pi = ci_get_pi(adev);
734         int ret;
735
736         if (pi->caps_sq_ramping || pi->caps_db_ramping ||
737             pi->caps_td_ramping || pi->caps_tcp_ramping) {
738                 gfx_v7_0_enter_rlc_safe_mode(adev);
739
740                 if (enable) {
741                         ret = ci_program_pt_config_registers(adev, didt_config_ci);
742                         if (ret) {
743                                 gfx_v7_0_exit_rlc_safe_mode(adev);
744                                 return ret;
745                         }
746                 }
747
748                 ci_do_enable_didt(adev, enable);
749
750                 gfx_v7_0_exit_rlc_safe_mode(adev);
751         }
752
753         return 0;
754 }
755
756 static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
757 {
758         struct ci_power_info *pi = ci_get_pi(adev);
759         PPSMC_Result smc_result;
760         int ret = 0;
761
762         if (enable) {
763                 pi->power_containment_features = 0;
764                 if (pi->caps_power_containment) {
765                         if (pi->enable_bapm_feature) {
766                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
767                                 if (smc_result != PPSMC_Result_OK)
768                                         ret = -EINVAL;
769                                 else
770                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
771                         }
772
773                         if (pi->enable_tdc_limit_feature) {
774                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
775                                 if (smc_result != PPSMC_Result_OK)
776                                         ret = -EINVAL;
777                                 else
778                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
779                         }
780
781                         if (pi->enable_pkg_pwr_tracking_feature) {
782                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
783                                 if (smc_result != PPSMC_Result_OK) {
784                                         ret = -EINVAL;
785                                 } else {
786                                         struct amdgpu_cac_tdp_table *cac_tdp_table =
787                                                 adev->pm.dpm.dyn_state.cac_tdp_table;
788                                         u32 default_pwr_limit =
789                                                 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
790
791                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
792
793                                         ci_set_power_limit(adev, default_pwr_limit);
794                                 }
795                         }
796                 }
797         } else {
798                 if (pi->caps_power_containment && pi->power_containment_features) {
799                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
800                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
801
802                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
803                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
804
805                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
806                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
807                         pi->power_containment_features = 0;
808                 }
809         }
810
811         return ret;
812 }
813
814 static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
815 {
816         struct ci_power_info *pi = ci_get_pi(adev);
817         PPSMC_Result smc_result;
818         int ret = 0;
819
820         if (pi->caps_cac) {
821                 if (enable) {
822                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
823                         if (smc_result != PPSMC_Result_OK) {
824                                 ret = -EINVAL;
825                                 pi->cac_enabled = false;
826                         } else {
827                                 pi->cac_enabled = true;
828                         }
829                 } else if (pi->cac_enabled) {
830                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
831                         pi->cac_enabled = false;
832                 }
833         }
834
835         return ret;
836 }
837
838 static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
839                                             bool enable)
840 {
841         struct ci_power_info *pi = ci_get_pi(adev);
842         PPSMC_Result smc_result = PPSMC_Result_OK;
843
844         if (pi->thermal_sclk_dpm_enabled) {
845                 if (enable)
846                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
847                 else
848                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
849         }
850
851         if (smc_result == PPSMC_Result_OK)
852                 return 0;
853         else
854                 return -EINVAL;
855 }
856
857 static int ci_power_control_set_level(struct amdgpu_device *adev)
858 {
859         struct ci_power_info *pi = ci_get_pi(adev);
860         struct amdgpu_cac_tdp_table *cac_tdp_table =
861                 adev->pm.dpm.dyn_state.cac_tdp_table;
862         s32 adjust_percent;
863         s32 target_tdp;
864         int ret = 0;
865         bool adjust_polarity = false; /* ??? */
866
867         if (pi->caps_power_containment) {
868                 adjust_percent = adjust_polarity ?
869                         adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
870                 target_tdp = ((100 + adjust_percent) *
871                               (s32)cac_tdp_table->configurable_tdp) / 100;
872
873                 ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
874         }
875
876         return ret;
877 }
878
879 static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
880 {
881         struct ci_power_info *pi = ci_get_pi(adev);
882
883         if (pi->uvd_power_gated == gate)
884                 return;
885
886         pi->uvd_power_gated = gate;
887
888         ci_update_uvd_dpm(adev, gate);
889 }
890
891 static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
892 {
893         u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
894         u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
895
896         if (vblank_time < switch_limit)
897                 return true;
898         else
899                 return false;
900
901 }
902
903 static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
904                                         struct amdgpu_ps *rps)
905 {
906         struct ci_ps *ps = ci_get_ps(rps);
907         struct ci_power_info *pi = ci_get_pi(adev);
908         struct amdgpu_clock_and_voltage_limits *max_limits;
909         bool disable_mclk_switching;
910         u32 sclk, mclk;
911         int i;
912
913         if (rps->vce_active) {
914                 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
915                 rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
916         } else {
917                 rps->evclk = 0;
918                 rps->ecclk = 0;
919         }
920
921         if ((adev->pm.dpm.new_active_crtc_count > 1) ||
922             ci_dpm_vblank_too_short(adev))
923                 disable_mclk_switching = true;
924         else
925                 disable_mclk_switching = false;
926
927         if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
928                 pi->battery_state = true;
929         else
930                 pi->battery_state = false;
931
932         if (adev->pm.dpm.ac_power)
933                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
934         else
935                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
936
937         if (adev->pm.dpm.ac_power == false) {
938                 for (i = 0; i < ps->performance_level_count; i++) {
939                         if (ps->performance_levels[i].mclk > max_limits->mclk)
940                                 ps->performance_levels[i].mclk = max_limits->mclk;
941                         if (ps->performance_levels[i].sclk > max_limits->sclk)
942                                 ps->performance_levels[i].sclk = max_limits->sclk;
943                 }
944         }
945
946         /* XXX validate the min clocks required for display */
947
948         if (disable_mclk_switching) {
949                 mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
950                 sclk = ps->performance_levels[0].sclk;
951         } else {
952                 mclk = ps->performance_levels[0].mclk;
953                 sclk = ps->performance_levels[0].sclk;
954         }
955
956         if (rps->vce_active) {
957                 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
958                         sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
959                 if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
960                         mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
961         }
962
963         ps->performance_levels[0].sclk = sclk;
964         ps->performance_levels[0].mclk = mclk;
965
966         if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
967                 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
968
969         if (disable_mclk_switching) {
970                 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
971                         ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
972         } else {
973                 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
974                         ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
975         }
976 }
977
978 static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
979                                             int min_temp, int max_temp)
980 {
981         int low_temp = 0 * 1000;
982         int high_temp = 255 * 1000;
983         u32 tmp;
984
985         if (low_temp < min_temp)
986                 low_temp = min_temp;
987         if (high_temp > max_temp)
988                 high_temp = max_temp;
989         if (high_temp < low_temp) {
990                 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
991                 return -EINVAL;
992         }
993
994         tmp = RREG32_SMC(ixCG_THERMAL_INT);
995         tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
996         tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
997                 ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
998         WREG32_SMC(ixCG_THERMAL_INT, tmp);
999
1000 #if 0
1001         /* XXX: need to figure out how to handle this properly */
1002         tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1003         tmp &= DIG_THERM_DPM_MASK;
1004         tmp |= DIG_THERM_DPM(high_temp / 1000);
1005         WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1006 #endif
1007
1008         adev->pm.dpm.thermal.min_temp = low_temp;
1009         adev->pm.dpm.thermal.max_temp = high_temp;
1010         return 0;
1011 }
1012
1013 static int ci_thermal_enable_alert(struct amdgpu_device *adev,
1014                                    bool enable)
1015 {
1016         u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
1017         PPSMC_Result result;
1018
1019         if (enable) {
1020                 thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1021                                  CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
1022                 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1023                 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
1024                 if (result != PPSMC_Result_OK) {
1025                         DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1026                         return -EINVAL;
1027                 }
1028         } else {
1029                 thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1030                         CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
1031                 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1032                 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
1033                 if (result != PPSMC_Result_OK) {
1034                         DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1035                         return -EINVAL;
1036                 }
1037         }
1038
1039         return 0;
1040 }
1041
1042 static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
1043 {
1044         struct ci_power_info *pi = ci_get_pi(adev);
1045         u32 tmp;
1046
1047         if (pi->fan_ctrl_is_in_default_mode) {
1048                 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
1049                         >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1050                 pi->fan_ctrl_default_mode = tmp;
1051                 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
1052                         >> CG_FDO_CTRL2__TMIN__SHIFT;
1053                 pi->t_min = tmp;
1054                 pi->fan_ctrl_is_in_default_mode = false;
1055         }
1056
1057         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1058         tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
1059         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1060
1061         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1062         tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1063         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1064 }
1065
1066 static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
1067 {
1068         struct ci_power_info *pi = ci_get_pi(adev);
1069         SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1070         u32 duty100;
1071         u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1072         u16 fdo_min, slope1, slope2;
1073         u32 reference_clock, tmp;
1074         int ret;
1075         u64 tmp64;
1076
1077         if (!pi->fan_table_start) {
1078                 adev->pm.dpm.fan.ucode_fan_control = false;
1079                 return 0;
1080         }
1081
1082         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1083                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1084
1085         if (duty100 == 0) {
1086                 adev->pm.dpm.fan.ucode_fan_control = false;
1087                 return 0;
1088         }
1089
1090         tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
1091         do_div(tmp64, 10000);
1092         fdo_min = (u16)tmp64;
1093
1094         t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
1095         t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
1096
1097         pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
1098         pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
1099
1100         slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1101         slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1102
1103         fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
1104         fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
1105         fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
1106
1107         fan_table.Slope1 = cpu_to_be16(slope1);
1108         fan_table.Slope2 = cpu_to_be16(slope2);
1109
1110         fan_table.FdoMin = cpu_to_be16(fdo_min);
1111
1112         fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
1113
1114         fan_table.HystUp = cpu_to_be16(1);
1115
1116         fan_table.HystSlope = cpu_to_be16(1);
1117
1118         fan_table.TempRespLim = cpu_to_be16(5);
1119
1120         reference_clock = amdgpu_asic_get_xclk(adev);
1121
1122         fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
1123                                                reference_clock) / 1600);
1124
1125         fan_table.FdoMax = cpu_to_be16((u16)duty100);
1126
1127         tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
1128                 >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
1129         fan_table.TempSrc = (uint8_t)tmp;
1130
1131         ret = amdgpu_ci_copy_bytes_to_smc(adev,
1132                                           pi->fan_table_start,
1133                                           (u8 *)(&fan_table),
1134                                           sizeof(fan_table),
1135                                           pi->sram_end);
1136
1137         if (ret) {
1138                 DRM_ERROR("Failed to load fan table to the SMC.");
1139                 adev->pm.dpm.fan.ucode_fan_control = false;
1140         }
1141
1142         return 0;
1143 }
1144
1145 static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
1146 {
1147         struct ci_power_info *pi = ci_get_pi(adev);
1148         PPSMC_Result ret;
1149
1150         if (pi->caps_od_fuzzy_fan_control_support) {
1151                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1152                                                                PPSMC_StartFanControl,
1153                                                                FAN_CONTROL_FUZZY);
1154                 if (ret != PPSMC_Result_OK)
1155                         return -EINVAL;
1156                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1157                                                                PPSMC_MSG_SetFanPwmMax,
1158                                                                adev->pm.dpm.fan.default_max_fan_pwm);
1159                 if (ret != PPSMC_Result_OK)
1160                         return -EINVAL;
1161         } else {
1162                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1163                                                                PPSMC_StartFanControl,
1164                                                                FAN_CONTROL_TABLE);
1165                 if (ret != PPSMC_Result_OK)
1166                         return -EINVAL;
1167         }
1168
1169         pi->fan_is_controlled_by_smc = true;
1170         return 0;
1171 }
1172
1173
1174 static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
1175 {
1176         PPSMC_Result ret;
1177         struct ci_power_info *pi = ci_get_pi(adev);
1178
1179         ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
1180         if (ret == PPSMC_Result_OK) {
1181                 pi->fan_is_controlled_by_smc = false;
1182                 return 0;
1183         } else {
1184                 return -EINVAL;
1185         }
1186 }
1187
1188 static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
1189                                         u32 *speed)
1190 {
1191         u32 duty, duty100;
1192         u64 tmp64;
1193
1194         if (adev->pm.no_fan)
1195                 return -ENOENT;
1196
1197         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1198                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1199         duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
1200                 >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
1201
1202         if (duty100 == 0)
1203                 return -EINVAL;
1204
1205         tmp64 = (u64)duty * 100;
1206         do_div(tmp64, duty100);
1207         *speed = (u32)tmp64;
1208
1209         if (*speed > 100)
1210                 *speed = 100;
1211
1212         return 0;
1213 }
1214
1215 static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
1216                                         u32 speed)
1217 {
1218         u32 tmp;
1219         u32 duty, duty100;
1220         u64 tmp64;
1221         struct ci_power_info *pi = ci_get_pi(adev);
1222
1223         if (adev->pm.no_fan)
1224                 return -ENOENT;
1225
1226         if (pi->fan_is_controlled_by_smc)
1227                 return -EINVAL;
1228
1229         if (speed > 100)
1230                 return -EINVAL;
1231
1232         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1233                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1234
1235         if (duty100 == 0)
1236                 return -EINVAL;
1237
1238         tmp64 = (u64)speed * duty100;
1239         do_div(tmp64, 100);
1240         duty = (u32)tmp64;
1241
1242         tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
1243         tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
1244         WREG32_SMC(ixCG_FDO_CTRL0, tmp);
1245
1246         return 0;
1247 }
1248
1249 static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
1250 {
1251         if (mode) {
1252                 /* stop auto-manage */
1253                 if (adev->pm.dpm.fan.ucode_fan_control)
1254                         ci_fan_ctrl_stop_smc_fan_control(adev);
1255                 ci_fan_ctrl_set_static_mode(adev, mode);
1256         } else {
1257                 /* restart auto-manage */
1258                 if (adev->pm.dpm.fan.ucode_fan_control)
1259                         ci_thermal_start_smc_fan_control(adev);
1260                 else
1261                         ci_fan_ctrl_set_default_mode(adev);
1262         }
1263 }
1264
1265 static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
1266 {
1267         struct ci_power_info *pi = ci_get_pi(adev);
1268         u32 tmp;
1269
1270         if (pi->fan_is_controlled_by_smc)
1271                 return 0;
1272
1273         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1274         return (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
1275 }
1276
1277 #if 0
1278 static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
1279                                          u32 *speed)
1280 {
1281         u32 tach_period;
1282         u32 xclk = amdgpu_asic_get_xclk(adev);
1283
1284         if (adev->pm.no_fan)
1285                 return -ENOENT;
1286
1287         if (adev->pm.fan_pulses_per_revolution == 0)
1288                 return -ENOENT;
1289
1290         tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
1291                 >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
1292         if (tach_period == 0)
1293                 return -ENOENT;
1294
1295         *speed = 60 * xclk * 10000 / tach_period;
1296
1297         return 0;
1298 }
1299
1300 static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
1301                                          u32 speed)
1302 {
1303         u32 tach_period, tmp;
1304         u32 xclk = amdgpu_asic_get_xclk(adev);
1305
1306         if (adev->pm.no_fan)
1307                 return -ENOENT;
1308
1309         if (adev->pm.fan_pulses_per_revolution == 0)
1310                 return -ENOENT;
1311
1312         if ((speed < adev->pm.fan_min_rpm) ||
1313             (speed > adev->pm.fan_max_rpm))
1314                 return -EINVAL;
1315
1316         if (adev->pm.dpm.fan.ucode_fan_control)
1317                 ci_fan_ctrl_stop_smc_fan_control(adev);
1318
1319         tach_period = 60 * xclk * 10000 / (8 * speed);
1320         tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
1321         tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
1322         WREG32_SMC(CG_TACH_CTRL, tmp);
1323
1324         ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
1325
1326         return 0;
1327 }
1328 #endif
1329
1330 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
1331 {
1332         struct ci_power_info *pi = ci_get_pi(adev);
1333         u32 tmp;
1334
1335         if (!pi->fan_ctrl_is_in_default_mode) {
1336                 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1337                 tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1338                 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1339
1340                 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1341                 tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
1342                 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1343                 pi->fan_ctrl_is_in_default_mode = true;
1344         }
1345 }
1346
1347 static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
1348 {
1349         if (adev->pm.dpm.fan.ucode_fan_control) {
1350                 ci_fan_ctrl_start_smc_fan_control(adev);
1351                 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
1352         }
1353 }
1354
1355 static void ci_thermal_initialize(struct amdgpu_device *adev)
1356 {
1357         u32 tmp;
1358
1359         if (adev->pm.fan_pulses_per_revolution) {
1360                 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
1361                 tmp |= (adev->pm.fan_pulses_per_revolution - 1)
1362                         << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
1363                 WREG32_SMC(ixCG_TACH_CTRL, tmp);
1364         }
1365
1366         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
1367         tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
1368         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1369 }
1370
1371 static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
1372 {
1373         int ret;
1374
1375         ci_thermal_initialize(adev);
1376         ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
1377         if (ret)
1378                 return ret;
1379         ret = ci_thermal_enable_alert(adev, true);
1380         if (ret)
1381                 return ret;
1382         if (adev->pm.dpm.fan.ucode_fan_control) {
1383                 ret = ci_thermal_setup_fan_table(adev);
1384                 if (ret)
1385                         return ret;
1386                 ci_thermal_start_smc_fan_control(adev);
1387         }
1388
1389         return 0;
1390 }
1391
1392 static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
1393 {
1394         if (!adev->pm.no_fan)
1395                 ci_fan_ctrl_set_default_mode(adev);
1396 }
1397
1398 #if 0
1399 static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1400                                      u16 reg_offset, u32 *value)
1401 {
1402         struct ci_power_info *pi = ci_get_pi(adev);
1403
1404         return amdgpu_ci_read_smc_sram_dword(adev,
1405                                       pi->soft_regs_start + reg_offset,
1406                                       value, pi->sram_end);
1407 }
1408 #endif
1409
1410 static int ci_write_smc_soft_register(struct amdgpu_device *adev,
1411                                       u16 reg_offset, u32 value)
1412 {
1413         struct ci_power_info *pi = ci_get_pi(adev);
1414
1415         return amdgpu_ci_write_smc_sram_dword(adev,
1416                                        pi->soft_regs_start + reg_offset,
1417                                        value, pi->sram_end);
1418 }
1419
1420 static void ci_init_fps_limits(struct amdgpu_device *adev)
1421 {
1422         struct ci_power_info *pi = ci_get_pi(adev);
1423         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1424
1425         if (pi->caps_fps) {
1426                 u16 tmp;
1427
1428                 tmp = 45;
1429                 table->FpsHighT = cpu_to_be16(tmp);
1430
1431                 tmp = 30;
1432                 table->FpsLowT = cpu_to_be16(tmp);
1433         }
1434 }
1435
1436 static int ci_update_sclk_t(struct amdgpu_device *adev)
1437 {
1438         struct ci_power_info *pi = ci_get_pi(adev);
1439         int ret = 0;
1440         u32 low_sclk_interrupt_t = 0;
1441
1442         if (pi->caps_sclk_throttle_low_notification) {
1443                 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1444
1445                 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1446                                            pi->dpm_table_start +
1447                                            offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1448                                            (u8 *)&low_sclk_interrupt_t,
1449                                            sizeof(u32), pi->sram_end);
1450
1451         }
1452
1453         return ret;
1454 }
1455
1456 static void ci_get_leakage_voltages(struct amdgpu_device *adev)
1457 {
1458         struct ci_power_info *pi = ci_get_pi(adev);
1459         u16 leakage_id, virtual_voltage_id;
1460         u16 vddc, vddci;
1461         int i;
1462
1463         pi->vddc_leakage.count = 0;
1464         pi->vddci_leakage.count = 0;
1465
1466         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1467                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1468                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1469                         if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
1470                                 continue;
1471                         if (vddc != 0 && vddc != virtual_voltage_id) {
1472                                 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1473                                 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1474                                 pi->vddc_leakage.count++;
1475                         }
1476                 }
1477         } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
1478                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1479                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1480                         if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
1481                                                                                      virtual_voltage_id,
1482                                                                                      leakage_id) == 0) {
1483                                 if (vddc != 0 && vddc != virtual_voltage_id) {
1484                                         pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1485                                         pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1486                                         pi->vddc_leakage.count++;
1487                                 }
1488                                 if (vddci != 0 && vddci != virtual_voltage_id) {
1489                                         pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1490                                         pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1491                                         pi->vddci_leakage.count++;
1492                                 }
1493                         }
1494                 }
1495         }
1496 }
1497
1498 static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
1499 {
1500         struct ci_power_info *pi = ci_get_pi(adev);
1501         bool want_thermal_protection;
1502         enum amdgpu_dpm_event_src dpm_event_src;
1503         u32 tmp;
1504
1505         switch (sources) {
1506         case 0:
1507         default:
1508                 want_thermal_protection = false;
1509                 break;
1510         case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
1511                 want_thermal_protection = true;
1512                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
1513                 break;
1514         case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1515                 want_thermal_protection = true;
1516                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
1517                 break;
1518         case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1519               (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1520                 want_thermal_protection = true;
1521                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1522                 break;
1523         }
1524
1525         if (want_thermal_protection) {
1526 #if 0
1527                 /* XXX: need to figure out how to handle this properly */
1528                 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1529                 tmp &= DPM_EVENT_SRC_MASK;
1530                 tmp |= DPM_EVENT_SRC(dpm_event_src);
1531                 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1532 #endif
1533
1534                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1535                 if (pi->thermal_protection)
1536                         tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1537                 else
1538                         tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1539                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1540         } else {
1541                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1542                 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1543                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1544         }
1545 }
1546
1547 static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
1548                                            enum amdgpu_dpm_auto_throttle_src source,
1549                                            bool enable)
1550 {
1551         struct ci_power_info *pi = ci_get_pi(adev);
1552
1553         if (enable) {
1554                 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1555                         pi->active_auto_throttle_sources |= 1 << source;
1556                         ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1557                 }
1558         } else {
1559                 if (pi->active_auto_throttle_sources & (1 << source)) {
1560                         pi->active_auto_throttle_sources &= ~(1 << source);
1561                         ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1562                 }
1563         }
1564 }
1565
1566 static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
1567 {
1568         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1569                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1570 }
1571
1572 static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1573 {
1574         struct ci_power_info *pi = ci_get_pi(adev);
1575         PPSMC_Result smc_result;
1576
1577         if (!pi->need_update_smu7_dpm_table)
1578                 return 0;
1579
1580         if ((!pi->sclk_dpm_key_disabled) &&
1581             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1582                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1583                 if (smc_result != PPSMC_Result_OK)
1584                         return -EINVAL;
1585         }
1586
1587         if ((!pi->mclk_dpm_key_disabled) &&
1588             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1589                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1590                 if (smc_result != PPSMC_Result_OK)
1591                         return -EINVAL;
1592         }
1593
1594         pi->need_update_smu7_dpm_table = 0;
1595         return 0;
1596 }
1597
1598 static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
1599 {
1600         struct ci_power_info *pi = ci_get_pi(adev);
1601         PPSMC_Result smc_result;
1602
1603         if (enable) {
1604                 if (!pi->sclk_dpm_key_disabled) {
1605                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
1606                         if (smc_result != PPSMC_Result_OK)
1607                                 return -EINVAL;
1608                 }
1609
1610                 if (!pi->mclk_dpm_key_disabled) {
1611                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
1612                         if (smc_result != PPSMC_Result_OK)
1613                                 return -EINVAL;
1614
1615                         WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
1616                                         ~MC_SEQ_CNTL_3__CAC_EN_MASK);
1617
1618                         WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
1619                         WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
1620                         WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
1621
1622                         udelay(10);
1623
1624                         WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
1625                         WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
1626                         WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
1627                 }
1628         } else {
1629                 if (!pi->sclk_dpm_key_disabled) {
1630                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
1631                         if (smc_result != PPSMC_Result_OK)
1632                                 return -EINVAL;
1633                 }
1634
1635                 if (!pi->mclk_dpm_key_disabled) {
1636                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
1637                         if (smc_result != PPSMC_Result_OK)
1638                                 return -EINVAL;
1639                 }
1640         }
1641
1642         return 0;
1643 }
1644
1645 static int ci_start_dpm(struct amdgpu_device *adev)
1646 {
1647         struct ci_power_info *pi = ci_get_pi(adev);
1648         PPSMC_Result smc_result;
1649         int ret;
1650         u32 tmp;
1651
1652         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1653         tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1654         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1655
1656         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1657         tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1658         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1659
1660         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1661
1662         WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
1663
1664         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
1665         if (smc_result != PPSMC_Result_OK)
1666                 return -EINVAL;
1667
1668         ret = ci_enable_sclk_mclk_dpm(adev, true);
1669         if (ret)
1670                 return ret;
1671
1672         if (!pi->pcie_dpm_key_disabled) {
1673                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
1674                 if (smc_result != PPSMC_Result_OK)
1675                         return -EINVAL;
1676         }
1677
1678         return 0;
1679 }
1680
1681 static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1682 {
1683         struct ci_power_info *pi = ci_get_pi(adev);
1684         PPSMC_Result smc_result;
1685
1686         if (!pi->need_update_smu7_dpm_table)
1687                 return 0;
1688
1689         if ((!pi->sclk_dpm_key_disabled) &&
1690             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1691                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1692                 if (smc_result != PPSMC_Result_OK)
1693                         return -EINVAL;
1694         }
1695
1696         if ((!pi->mclk_dpm_key_disabled) &&
1697             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1698                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1699                 if (smc_result != PPSMC_Result_OK)
1700                         return -EINVAL;
1701         }
1702
1703         return 0;
1704 }
1705
1706 static int ci_stop_dpm(struct amdgpu_device *adev)
1707 {
1708         struct ci_power_info *pi = ci_get_pi(adev);
1709         PPSMC_Result smc_result;
1710         int ret;
1711         u32 tmp;
1712
1713         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1714         tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1715         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1716
1717         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1718         tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1719         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1720
1721         if (!pi->pcie_dpm_key_disabled) {
1722                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
1723                 if (smc_result != PPSMC_Result_OK)
1724                         return -EINVAL;
1725         }
1726
1727         ret = ci_enable_sclk_mclk_dpm(adev, false);
1728         if (ret)
1729                 return ret;
1730
1731         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
1732         if (smc_result != PPSMC_Result_OK)
1733                 return -EINVAL;
1734
1735         return 0;
1736 }
1737
1738 static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
1739 {
1740         u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1741
1742         if (enable)
1743                 tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1744         else
1745                 tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1746         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1747 }
1748
1749 #if 0
1750 static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
1751                                         bool ac_power)
1752 {
1753         struct ci_power_info *pi = ci_get_pi(adev);
1754         struct amdgpu_cac_tdp_table *cac_tdp_table =
1755                 adev->pm.dpm.dyn_state.cac_tdp_table;
1756         u32 power_limit;
1757
1758         if (ac_power)
1759                 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1760         else
1761                 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1762
1763         ci_set_power_limit(adev, power_limit);
1764
1765         if (pi->caps_automatic_dc_transition) {
1766                 if (ac_power)
1767                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
1768                 else
1769                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
1770         }
1771
1772         return 0;
1773 }
1774 #endif
1775
1776 static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
1777                                                       PPSMC_Msg msg, u32 parameter)
1778 {
1779         WREG32(mmSMC_MSG_ARG_0, parameter);
1780         return amdgpu_ci_send_msg_to_smc(adev, msg);
1781 }
1782
1783 static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
1784                                                         PPSMC_Msg msg, u32 *parameter)
1785 {
1786         PPSMC_Result smc_result;
1787
1788         smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
1789
1790         if ((smc_result == PPSMC_Result_OK) && parameter)
1791                 *parameter = RREG32(mmSMC_MSG_ARG_0);
1792
1793         return smc_result;
1794 }
1795
1796 static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
1797 {
1798         struct ci_power_info *pi = ci_get_pi(adev);
1799
1800         if (!pi->sclk_dpm_key_disabled) {
1801                 PPSMC_Result smc_result =
1802                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1803                 if (smc_result != PPSMC_Result_OK)
1804                         return -EINVAL;
1805         }
1806
1807         return 0;
1808 }
1809
1810 static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
1811 {
1812         struct ci_power_info *pi = ci_get_pi(adev);
1813
1814         if (!pi->mclk_dpm_key_disabled) {
1815                 PPSMC_Result smc_result =
1816                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1817                 if (smc_result != PPSMC_Result_OK)
1818                         return -EINVAL;
1819         }
1820
1821         return 0;
1822 }
1823
1824 static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
1825 {
1826         struct ci_power_info *pi = ci_get_pi(adev);
1827
1828         if (!pi->pcie_dpm_key_disabled) {
1829                 PPSMC_Result smc_result =
1830                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1831                 if (smc_result != PPSMC_Result_OK)
1832                         return -EINVAL;
1833         }
1834
1835         return 0;
1836 }
1837
1838 static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
1839 {
1840         struct ci_power_info *pi = ci_get_pi(adev);
1841
1842         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1843                 PPSMC_Result smc_result =
1844                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
1845                 if (smc_result != PPSMC_Result_OK)
1846                         return -EINVAL;
1847         }
1848
1849         return 0;
1850 }
1851
1852 static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
1853                                        u32 target_tdp)
1854 {
1855         PPSMC_Result smc_result =
1856                 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1857         if (smc_result != PPSMC_Result_OK)
1858                 return -EINVAL;
1859         return 0;
1860 }
1861
1862 #if 0
1863 static int ci_set_boot_state(struct amdgpu_device *adev)
1864 {
1865         return ci_enable_sclk_mclk_dpm(adev, false);
1866 }
1867 #endif
1868
1869 static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
1870 {
1871         u32 sclk_freq;
1872         PPSMC_Result smc_result =
1873                 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1874                                                     PPSMC_MSG_API_GetSclkFrequency,
1875                                                     &sclk_freq);
1876         if (smc_result != PPSMC_Result_OK)
1877                 sclk_freq = 0;
1878
1879         return sclk_freq;
1880 }
1881
1882 static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
1883 {
1884         u32 mclk_freq;
1885         PPSMC_Result smc_result =
1886                 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1887                                                     PPSMC_MSG_API_GetMclkFrequency,
1888                                                     &mclk_freq);
1889         if (smc_result != PPSMC_Result_OK)
1890                 mclk_freq = 0;
1891
1892         return mclk_freq;
1893 }
1894
1895 static void ci_dpm_start_smc(struct amdgpu_device *adev)
1896 {
1897         int i;
1898
1899         amdgpu_ci_program_jump_on_start(adev);
1900         amdgpu_ci_start_smc_clock(adev);
1901         amdgpu_ci_start_smc(adev);
1902         for (i = 0; i < adev->usec_timeout; i++) {
1903                 if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
1904                         break;
1905         }
1906 }
1907
1908 static void ci_dpm_stop_smc(struct amdgpu_device *adev)
1909 {
1910         amdgpu_ci_reset_smc(adev);
1911         amdgpu_ci_stop_smc_clock(adev);
1912 }
1913
1914 static int ci_process_firmware_header(struct amdgpu_device *adev)
1915 {
1916         struct ci_power_info *pi = ci_get_pi(adev);
1917         u32 tmp;
1918         int ret;
1919
1920         ret = amdgpu_ci_read_smc_sram_dword(adev,
1921                                      SMU7_FIRMWARE_HEADER_LOCATION +
1922                                      offsetof(SMU7_Firmware_Header, DpmTable),
1923                                      &tmp, pi->sram_end);
1924         if (ret)
1925                 return ret;
1926
1927         pi->dpm_table_start = tmp;
1928
1929         ret = amdgpu_ci_read_smc_sram_dword(adev,
1930                                      SMU7_FIRMWARE_HEADER_LOCATION +
1931                                      offsetof(SMU7_Firmware_Header, SoftRegisters),
1932                                      &tmp, pi->sram_end);
1933         if (ret)
1934                 return ret;
1935
1936         pi->soft_regs_start = tmp;
1937
1938         ret = amdgpu_ci_read_smc_sram_dword(adev,
1939                                      SMU7_FIRMWARE_HEADER_LOCATION +
1940                                      offsetof(SMU7_Firmware_Header, mcRegisterTable),
1941                                      &tmp, pi->sram_end);
1942         if (ret)
1943                 return ret;
1944
1945         pi->mc_reg_table_start = tmp;
1946
1947         ret = amdgpu_ci_read_smc_sram_dword(adev,
1948                                      SMU7_FIRMWARE_HEADER_LOCATION +
1949                                      offsetof(SMU7_Firmware_Header, FanTable),
1950                                      &tmp, pi->sram_end);
1951         if (ret)
1952                 return ret;
1953
1954         pi->fan_table_start = tmp;
1955
1956         ret = amdgpu_ci_read_smc_sram_dword(adev,
1957                                      SMU7_FIRMWARE_HEADER_LOCATION +
1958                                      offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1959                                      &tmp, pi->sram_end);
1960         if (ret)
1961                 return ret;
1962
1963         pi->arb_table_start = tmp;
1964
1965         return 0;
1966 }
1967
1968 static void ci_read_clock_registers(struct amdgpu_device *adev)
1969 {
1970         struct ci_power_info *pi = ci_get_pi(adev);
1971
1972         pi->clock_registers.cg_spll_func_cntl =
1973                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
1974         pi->clock_registers.cg_spll_func_cntl_2 =
1975                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
1976         pi->clock_registers.cg_spll_func_cntl_3 =
1977                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
1978         pi->clock_registers.cg_spll_func_cntl_4 =
1979                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
1980         pi->clock_registers.cg_spll_spread_spectrum =
1981                 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
1982         pi->clock_registers.cg_spll_spread_spectrum_2 =
1983                 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
1984         pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
1985         pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
1986         pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
1987         pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
1988         pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
1989         pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
1990         pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
1991         pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
1992         pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
1993 }
1994
1995 static void ci_init_sclk_t(struct amdgpu_device *adev)
1996 {
1997         struct ci_power_info *pi = ci_get_pi(adev);
1998
1999         pi->low_sclk_interrupt_t = 0;
2000 }
2001
2002 static void ci_enable_thermal_protection(struct amdgpu_device *adev,
2003                                          bool enable)
2004 {
2005         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2006
2007         if (enable)
2008                 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2009         else
2010                 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2011         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2012 }
2013
2014 static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
2015 {
2016         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2017
2018         tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
2019
2020         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2021 }
2022
2023 #if 0
2024 static int ci_enter_ulp_state(struct amdgpu_device *adev)
2025 {
2026
2027         WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
2028
2029         udelay(25000);
2030
2031         return 0;
2032 }
2033
2034 static int ci_exit_ulp_state(struct amdgpu_device *adev)
2035 {
2036         int i;
2037
2038         WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
2039
2040         udelay(7000);
2041
2042         for (i = 0; i < adev->usec_timeout; i++) {
2043                 if (RREG32(mmSMC_RESP_0) == 1)
2044                         break;
2045                 udelay(1000);
2046         }
2047
2048         return 0;
2049 }
2050 #endif
2051
2052 static int ci_notify_smc_display_change(struct amdgpu_device *adev,
2053                                         bool has_display)
2054 {
2055         PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
2056
2057         return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
2058 }
2059
2060 static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
2061                                       bool enable)
2062 {
2063         struct ci_power_info *pi = ci_get_pi(adev);
2064
2065         if (enable) {
2066                 if (pi->caps_sclk_ds) {
2067                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
2068                                 return -EINVAL;
2069                 } else {
2070                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2071                                 return -EINVAL;
2072                 }
2073         } else {
2074                 if (pi->caps_sclk_ds) {
2075                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2076                                 return -EINVAL;
2077                 }
2078         }
2079
2080         return 0;
2081 }
2082
2083 static void ci_program_display_gap(struct amdgpu_device *adev)
2084 {
2085         u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2086         u32 pre_vbi_time_in_us;
2087         u32 frame_time_in_us;
2088         u32 ref_clock = adev->clock.spll.reference_freq;
2089         u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
2090         u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
2091
2092         tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
2093         if (adev->pm.dpm.new_active_crtc_count > 0)
2094                 tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2095         else
2096                 tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2097         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2098
2099         if (refresh_rate == 0)
2100                 refresh_rate = 60;
2101         if (vblank_time == 0xffffffff)
2102                 vblank_time = 500;
2103         frame_time_in_us = 1000000 / refresh_rate;
2104         pre_vbi_time_in_us =
2105                 frame_time_in_us - 200 - vblank_time;
2106         tmp = pre_vbi_time_in_us * (ref_clock / 100);
2107
2108         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
2109         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2110         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2111
2112
2113         ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
2114
2115 }
2116
2117 static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
2118 {
2119         struct ci_power_info *pi = ci_get_pi(adev);
2120         u32 tmp;
2121
2122         if (enable) {
2123                 if (pi->caps_sclk_ss_support) {
2124                         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2125                         tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2126                         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2127                 }
2128         } else {
2129                 tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2130                 tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
2131                 WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
2132
2133                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2134                 tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2135                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2136         }
2137 }
2138
2139 static void ci_program_sstp(struct amdgpu_device *adev)
2140 {
2141         WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
2142         ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
2143          (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
2144 }
2145
2146 static void ci_enable_display_gap(struct amdgpu_device *adev)
2147 {
2148         u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2149
2150         tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
2151                         CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
2152         tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
2153                 (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
2154
2155         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2156 }
2157
2158 static void ci_program_vc(struct amdgpu_device *adev)
2159 {
2160         u32 tmp;
2161
2162         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2163         tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2164         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2165
2166         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
2167         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
2168         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
2169         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
2170         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
2171         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
2172         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
2173         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
2174 }
2175
2176 static void ci_clear_vc(struct amdgpu_device *adev)
2177 {
2178         u32 tmp;
2179
2180         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2181         tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2182         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2183
2184         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
2185         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
2186         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
2187         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
2188         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
2189         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
2190         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
2191         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
2192 }
2193
2194 static int ci_upload_firmware(struct amdgpu_device *adev)
2195 {
2196         struct ci_power_info *pi = ci_get_pi(adev);
2197         int i, ret;
2198
2199         for (i = 0; i < adev->usec_timeout; i++) {
2200                 if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
2201                         break;
2202         }
2203         WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
2204
2205         amdgpu_ci_stop_smc_clock(adev);
2206         amdgpu_ci_reset_smc(adev);
2207
2208         ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end);
2209
2210         return ret;
2211
2212 }
2213
2214 static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
2215                                      struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
2216                                      struct atom_voltage_table *voltage_table)
2217 {
2218         u32 i;
2219
2220         if (voltage_dependency_table == NULL)
2221                 return -EINVAL;
2222
2223         voltage_table->mask_low = 0;
2224         voltage_table->phase_delay = 0;
2225
2226         voltage_table->count = voltage_dependency_table->count;
2227         for (i = 0; i < voltage_table->count; i++) {
2228                 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2229                 voltage_table->entries[i].smio_low = 0;
2230         }
2231
2232         return 0;
2233 }
2234
2235 static int ci_construct_voltage_tables(struct amdgpu_device *adev)
2236 {
2237         struct ci_power_info *pi = ci_get_pi(adev);
2238         int ret;
2239
2240         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2241                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
2242                                                         VOLTAGE_OBJ_GPIO_LUT,
2243                                                         &pi->vddc_voltage_table);
2244                 if (ret)
2245                         return ret;
2246         } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2247                 ret = ci_get_svi2_voltage_table(adev,
2248                                                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2249                                                 &pi->vddc_voltage_table);
2250                 if (ret)
2251                         return ret;
2252         }
2253
2254         if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2255                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
2256                                                          &pi->vddc_voltage_table);
2257
2258         if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2259                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
2260                                                         VOLTAGE_OBJ_GPIO_LUT,
2261                                                         &pi->vddci_voltage_table);
2262                 if (ret)
2263                         return ret;
2264         } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2265                 ret = ci_get_svi2_voltage_table(adev,
2266                                                 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2267                                                 &pi->vddci_voltage_table);
2268                 if (ret)
2269                         return ret;
2270         }
2271
2272         if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2273                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
2274                                                          &pi->vddci_voltage_table);
2275
2276         if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2277                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
2278                                                         VOLTAGE_OBJ_GPIO_LUT,
2279                                                         &pi->mvdd_voltage_table);
2280                 if (ret)
2281                         return ret;
2282         } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2283                 ret = ci_get_svi2_voltage_table(adev,
2284                                                 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2285                                                 &pi->mvdd_voltage_table);
2286                 if (ret)
2287                         return ret;
2288         }
2289
2290         if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2291                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
2292                                                          &pi->mvdd_voltage_table);
2293
2294         return 0;
2295 }
2296
2297 static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
2298                                           struct atom_voltage_table_entry *voltage_table,
2299                                           SMU7_Discrete_VoltageLevel *smc_voltage_table)
2300 {
2301         int ret;
2302
2303         ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
2304                                             &smc_voltage_table->StdVoltageHiSidd,
2305                                             &smc_voltage_table->StdVoltageLoSidd);
2306
2307         if (ret) {
2308                 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2309                 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2310         }
2311
2312         smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2313         smc_voltage_table->StdVoltageHiSidd =
2314                 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2315         smc_voltage_table->StdVoltageLoSidd =
2316                 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2317 }
2318
2319 static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
2320                                       SMU7_Discrete_DpmTable *table)
2321 {
2322         struct ci_power_info *pi = ci_get_pi(adev);
2323         unsigned int count;
2324
2325         table->VddcLevelCount = pi->vddc_voltage_table.count;
2326         for (count = 0; count < table->VddcLevelCount; count++) {
2327                 ci_populate_smc_voltage_table(adev,
2328                                               &pi->vddc_voltage_table.entries[count],
2329                                               &table->VddcLevel[count]);
2330
2331                 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2332                         table->VddcLevel[count].Smio |=
2333                                 pi->vddc_voltage_table.entries[count].smio_low;
2334                 else
2335                         table->VddcLevel[count].Smio = 0;
2336         }
2337         table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2338
2339         return 0;
2340 }
2341
2342 static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
2343                                        SMU7_Discrete_DpmTable *table)
2344 {
2345         unsigned int count;
2346         struct ci_power_info *pi = ci_get_pi(adev);
2347
2348         table->VddciLevelCount = pi->vddci_voltage_table.count;
2349         for (count = 0; count < table->VddciLevelCount; count++) {
2350                 ci_populate_smc_voltage_table(adev,
2351                                               &pi->vddci_voltage_table.entries[count],
2352                                               &table->VddciLevel[count]);
2353
2354                 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2355                         table->VddciLevel[count].Smio |=
2356                                 pi->vddci_voltage_table.entries[count].smio_low;
2357                 else
2358                         table->VddciLevel[count].Smio = 0;
2359         }
2360         table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2361
2362         return 0;
2363 }
2364
2365 static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
2366                                       SMU7_Discrete_DpmTable *table)
2367 {
2368         struct ci_power_info *pi = ci_get_pi(adev);
2369         unsigned int count;
2370
2371         table->MvddLevelCount = pi->mvdd_voltage_table.count;
2372         for (count = 0; count < table->MvddLevelCount; count++) {
2373                 ci_populate_smc_voltage_table(adev,
2374                                               &pi->mvdd_voltage_table.entries[count],
2375                                               &table->MvddLevel[count]);
2376
2377                 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2378                         table->MvddLevel[count].Smio |=
2379                                 pi->mvdd_voltage_table.entries[count].smio_low;
2380                 else
2381                         table->MvddLevel[count].Smio = 0;
2382         }
2383         table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2384
2385         return 0;
2386 }
2387
2388 static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
2389                                           SMU7_Discrete_DpmTable *table)
2390 {
2391         int ret;
2392
2393         ret = ci_populate_smc_vddc_table(adev, table);
2394         if (ret)
2395                 return ret;
2396
2397         ret = ci_populate_smc_vddci_table(adev, table);
2398         if (ret)
2399                 return ret;
2400
2401         ret = ci_populate_smc_mvdd_table(adev, table);
2402         if (ret)
2403                 return ret;
2404
2405         return 0;
2406 }
2407
2408 static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
2409                                   SMU7_Discrete_VoltageLevel *voltage)
2410 {
2411         struct ci_power_info *pi = ci_get_pi(adev);
2412         u32 i = 0;
2413
2414         if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2415                 for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2416                         if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2417                                 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2418                                 break;
2419                         }
2420                 }
2421
2422                 if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2423                         return -EINVAL;
2424         }
2425
2426         return -EINVAL;
2427 }
2428
2429 static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
2430                                          struct atom_voltage_table_entry *voltage_table,
2431                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2432 {
2433         u16 v_index, idx;
2434         bool voltage_found = false;
2435         *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2436         *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2437
2438         if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2439                 return -EINVAL;
2440
2441         if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2442                 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2443                         if (voltage_table->value ==
2444                             adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2445                                 voltage_found = true;
2446                                 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2447                                         idx = v_index;
2448                                 else
2449                                         idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2450                                 *std_voltage_lo_sidd =
2451                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2452                                 *std_voltage_hi_sidd =
2453                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2454                                 break;
2455                         }
2456                 }
2457
2458                 if (!voltage_found) {
2459                         for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2460                                 if (voltage_table->value <=
2461                                     adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2462                                         voltage_found = true;
2463                                         if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2464                                                 idx = v_index;
2465                                         else
2466                                                 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2467                                         *std_voltage_lo_sidd =
2468                                                 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2469                                         *std_voltage_hi_sidd =
2470                                                 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2471                                         break;
2472                                 }
2473                         }
2474                 }
2475         }
2476
2477         return 0;
2478 }
2479
2480 static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
2481                                                   const struct amdgpu_phase_shedding_limits_table *limits,
2482                                                   u32 sclk,
2483                                                   u32 *phase_shedding)
2484 {
2485         unsigned int i;
2486
2487         *phase_shedding = 1;
2488
2489         for (i = 0; i < limits->count; i++) {
2490                 if (sclk < limits->entries[i].sclk) {
2491                         *phase_shedding = i;
2492                         break;
2493                 }
2494         }
2495 }
2496
2497 static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
2498                                                   const struct amdgpu_phase_shedding_limits_table *limits,
2499                                                   u32 mclk,
2500                                                   u32 *phase_shedding)
2501 {
2502         unsigned int i;
2503
2504         *phase_shedding = 1;
2505
2506         for (i = 0; i < limits->count; i++) {
2507                 if (mclk < limits->entries[i].mclk) {
2508                         *phase_shedding = i;
2509                         break;
2510                 }
2511         }
2512 }
2513
2514 static int ci_init_arb_table_index(struct amdgpu_device *adev)
2515 {
2516         struct ci_power_info *pi = ci_get_pi(adev);
2517         u32 tmp;
2518         int ret;
2519
2520         ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
2521                                      &tmp, pi->sram_end);
2522         if (ret)
2523                 return ret;
2524
2525         tmp &= 0x00FFFFFF;
2526         tmp |= MC_CG_ARB_FREQ_F1 << 24;
2527
2528         return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
2529                                        tmp, pi->sram_end);
2530 }
2531
2532 static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
2533                                          struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
2534                                          u32 clock, u32 *voltage)
2535 {
2536         u32 i = 0;
2537
2538         if (allowed_clock_voltage_table->count == 0)
2539                 return -EINVAL;
2540
2541         for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2542                 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2543                         *voltage = allowed_clock_voltage_table->entries[i].v;
2544                         return 0;
2545                 }
2546         }
2547
2548         *voltage = allowed_clock_voltage_table->entries[i-1].v;
2549
2550         return 0;
2551 }
2552
2553 static u8 ci_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
2554                                              u32 sclk, u32 min_sclk_in_sr)
2555 {
2556         u32 i;
2557         u32 tmp;
2558         u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
2559                 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
2560
2561         if (sclk < min)
2562                 return 0;
2563
2564         for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2565                 tmp = sclk / (1 << i);
2566                 if (tmp >= min || i == 0)
2567                         break;
2568         }
2569
2570         return (u8)i;
2571 }
2572
2573 static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
2574 {
2575         return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2576 }
2577
2578 static int ci_reset_to_default(struct amdgpu_device *adev)
2579 {
2580         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2581                 0 : -EINVAL;
2582 }
2583
2584 static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
2585 {
2586         u32 tmp;
2587
2588         tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
2589
2590         if (tmp == MC_CG_ARB_FREQ_F0)
2591                 return 0;
2592
2593         return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
2594 }
2595
2596 static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
2597                                         const u32 engine_clock,
2598                                         const u32 memory_clock,
2599                                         u32 *dram_timimg2)
2600 {
2601         bool patch;
2602         u32 tmp, tmp2;
2603
2604         tmp = RREG32(mmMC_SEQ_MISC0);
2605         patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2606
2607         if (patch &&
2608             ((adev->pdev->device == 0x67B0) ||
2609              (adev->pdev->device == 0x67B1))) {
2610                 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2611                         tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2612                         *dram_timimg2 &= ~0x00ff0000;
2613                         *dram_timimg2 |= tmp2 << 16;
2614                 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2615                         tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2616                         *dram_timimg2 &= ~0x00ff0000;
2617                         *dram_timimg2 |= tmp2 << 16;
2618                 }
2619         }
2620 }
2621
2622 static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
2623                                                 u32 sclk,
2624                                                 u32 mclk,
2625                                                 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2626 {
2627         u32 dram_timing;
2628         u32 dram_timing2;
2629         u32 burst_time;
2630
2631         amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
2632
2633         dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
2634         dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
2635         burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
2636
2637         ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
2638
2639         arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2640         arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2641         arb_regs->McArbBurstTime = (u8)burst_time;
2642
2643         return 0;
2644 }
2645
2646 static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
2647 {
2648         struct ci_power_info *pi = ci_get_pi(adev);
2649         SMU7_Discrete_MCArbDramTimingTable arb_regs;
2650         u32 i, j;
2651         int ret =  0;
2652
2653         memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2654
2655         for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2656                 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2657                         ret = ci_populate_memory_timing_parameters(adev,
2658                                                                    pi->dpm_table.sclk_table.dpm_levels[i].value,
2659                                                                    pi->dpm_table.mclk_table.dpm_levels[j].value,
2660                                                                    &arb_regs.entries[i][j]);
2661                         if (ret)
2662                                 break;
2663                 }
2664         }
2665
2666         if (ret == 0)
2667                 ret = amdgpu_ci_copy_bytes_to_smc(adev,
2668                                            pi->arb_table_start,
2669                                            (u8 *)&arb_regs,
2670                                            sizeof(SMU7_Discrete_MCArbDramTimingTable),
2671                                            pi->sram_end);
2672
2673         return ret;
2674 }
2675
2676 static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
2677 {
2678         struct ci_power_info *pi = ci_get_pi(adev);
2679
2680         if (pi->need_update_smu7_dpm_table == 0)
2681                 return 0;
2682
2683         return ci_do_program_memory_timing_parameters(adev);
2684 }
2685
2686 static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
2687                                           struct amdgpu_ps *amdgpu_boot_state)
2688 {
2689         struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
2690         struct ci_power_info *pi = ci_get_pi(adev);
2691         u32 level = 0;
2692
2693         for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2694                 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2695                     boot_state->performance_levels[0].sclk) {
2696                         pi->smc_state_table.GraphicsBootLevel = level;
2697                         break;
2698                 }
2699         }
2700
2701         for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2702                 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2703                     boot_state->performance_levels[0].mclk) {
2704                         pi->smc_state_table.MemoryBootLevel = level;
2705                         break;
2706                 }
2707         }
2708 }
2709
2710 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2711 {
2712         u32 i;
2713         u32 mask_value = 0;
2714
2715         for (i = dpm_table->count; i > 0; i--) {
2716                 mask_value = mask_value << 1;
2717                 if (dpm_table->dpm_levels[i-1].enabled)
2718                         mask_value |= 0x1;
2719                 else
2720                         mask_value &= 0xFFFFFFFE;
2721         }
2722
2723         return mask_value;
2724 }
2725
2726 static void ci_populate_smc_link_level(struct amdgpu_device *adev,
2727                                        SMU7_Discrete_DpmTable *table)
2728 {
2729         struct ci_power_info *pi = ci_get_pi(adev);
2730         struct ci_dpm_table *dpm_table = &pi->dpm_table;
2731         u32 i;
2732
2733         for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2734                 table->LinkLevel[i].PcieGenSpeed =
2735                         (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2736                 table->LinkLevel[i].PcieLaneCount =
2737                         amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2738                 table->LinkLevel[i].EnabledForActivity = 1;
2739                 table->LinkLevel[i].DownT = cpu_to_be32(5);
2740                 table->LinkLevel[i].UpT = cpu_to_be32(30);
2741         }
2742
2743         pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2744         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2745                 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2746 }
2747
2748 static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
2749                                      SMU7_Discrete_DpmTable *table)
2750 {
2751         u32 count;
2752         struct atom_clock_dividers dividers;
2753         int ret = -EINVAL;
2754
2755         table->UvdLevelCount =
2756                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2757
2758         for (count = 0; count < table->UvdLevelCount; count++) {
2759                 table->UvdLevel[count].VclkFrequency =
2760                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2761                 table->UvdLevel[count].DclkFrequency =
2762                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2763                 table->UvdLevel[count].MinVddc =
2764                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2765                 table->UvdLevel[count].MinVddcPhases = 1;
2766
2767                 ret = amdgpu_atombios_get_clock_dividers(adev,
2768                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2769                                                          table->UvdLevel[count].VclkFrequency, false, &dividers);
2770                 if (ret)
2771                         return ret;
2772
2773                 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2774
2775                 ret = amdgpu_atombios_get_clock_dividers(adev,
2776                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2777                                                          table->UvdLevel[count].DclkFrequency, false, &dividers);
2778                 if (ret)
2779                         return ret;
2780
2781                 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2782
2783                 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2784                 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2785                 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2786         }
2787
2788         return ret;
2789 }
2790
2791 static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
2792                                      SMU7_Discrete_DpmTable *table)
2793 {
2794         u32 count;
2795         struct atom_clock_dividers dividers;
2796         int ret = -EINVAL;
2797
2798         table->VceLevelCount =
2799                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2800
2801         for (count = 0; count < table->VceLevelCount; count++) {
2802                 table->VceLevel[count].Frequency =
2803                         adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2804                 table->VceLevel[count].MinVoltage =
2805                         (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2806                 table->VceLevel[count].MinPhases = 1;
2807
2808                 ret = amdgpu_atombios_get_clock_dividers(adev,
2809                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2810                                                          table->VceLevel[count].Frequency, false, &dividers);
2811                 if (ret)
2812                         return ret;
2813
2814                 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2815
2816                 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2817                 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2818         }
2819
2820         return ret;
2821
2822 }
2823
2824 static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
2825                                      SMU7_Discrete_DpmTable *table)
2826 {
2827         u32 count;
2828         struct atom_clock_dividers dividers;
2829         int ret = -EINVAL;
2830
2831         table->AcpLevelCount = (u8)
2832                 (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2833
2834         for (count = 0; count < table->AcpLevelCount; count++) {
2835                 table->AcpLevel[count].Frequency =
2836                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2837                 table->AcpLevel[count].MinVoltage =
2838                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2839                 table->AcpLevel[count].MinPhases = 1;
2840
2841                 ret = amdgpu_atombios_get_clock_dividers(adev,
2842                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2843                                                          table->AcpLevel[count].Frequency, false, &dividers);
2844                 if (ret)
2845                         return ret;
2846
2847                 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2848
2849                 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2850                 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2851         }
2852
2853         return ret;
2854 }
2855
2856 static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
2857                                       SMU7_Discrete_DpmTable *table)
2858 {
2859         u32 count;
2860         struct atom_clock_dividers dividers;
2861         int ret = -EINVAL;
2862
2863         table->SamuLevelCount =
2864                 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2865
2866         for (count = 0; count < table->SamuLevelCount; count++) {
2867                 table->SamuLevel[count].Frequency =
2868                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2869                 table->SamuLevel[count].MinVoltage =
2870                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2871                 table->SamuLevel[count].MinPhases = 1;
2872
2873                 ret = amdgpu_atombios_get_clock_dividers(adev,
2874                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2875                                                          table->SamuLevel[count].Frequency, false, &dividers);
2876                 if (ret)
2877                         return ret;
2878
2879                 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2880
2881                 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2882                 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2883         }
2884
2885         return ret;
2886 }
2887
2888 static int ci_calculate_mclk_params(struct amdgpu_device *adev,
2889                                     u32 memory_clock,
2890                                     SMU7_Discrete_MemoryLevel *mclk,
2891                                     bool strobe_mode,
2892                                     bool dll_state_on)
2893 {
2894         struct ci_power_info *pi = ci_get_pi(adev);
2895         u32  dll_cntl = pi->clock_registers.dll_cntl;
2896         u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2897         u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2898         u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2899         u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2900         u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2901         u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2902         u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2903         u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2904         struct atom_mpll_param mpll_param;
2905         int ret;
2906
2907         ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
2908         if (ret)
2909                 return ret;
2910
2911         mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
2912         mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
2913
2914         mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
2915                         MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
2916         mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
2917                 (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
2918                 (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
2919
2920         mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
2921         mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2922
2923         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
2924                 mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
2925                                 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
2926                 mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
2927                                 (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2928         }
2929
2930         if (pi->caps_mclk_ss_support) {
2931                 struct amdgpu_atom_ss ss;
2932                 u32 freq_nom;
2933                 u32 tmp;
2934                 u32 reference_clock = adev->clock.mpll.reference_freq;
2935
2936                 if (mpll_param.qdr == 1)
2937                         freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2938                 else
2939                         freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2940
2941                 tmp = (freq_nom / reference_clock);
2942                 tmp = tmp * tmp;
2943                 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
2944                                                      ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2945                         u32 clks = reference_clock * 5 / ss.rate;
2946                         u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2947
2948                         mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
2949                         mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
2950
2951                         mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
2952                         mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
2953                 }
2954         }
2955
2956         mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
2957         mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
2958
2959         if (dll_state_on)
2960                 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2961                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
2962         else
2963                 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2964                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
2965
2966         mclk->MclkFrequency = memory_clock;
2967         mclk->MpllFuncCntl = mpll_func_cntl;
2968         mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2969         mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2970         mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2971         mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2972         mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2973         mclk->DllCntl = dll_cntl;
2974         mclk->MpllSs1 = mpll_ss1;
2975         mclk->MpllSs2 = mpll_ss2;
2976
2977         return 0;
2978 }
2979
2980 static int ci_populate_single_memory_level(struct amdgpu_device *adev,
2981                                            u32 memory_clock,
2982                                            SMU7_Discrete_MemoryLevel *memory_level)
2983 {
2984         struct ci_power_info *pi = ci_get_pi(adev);
2985         int ret;
2986         bool dll_state_on;
2987
2988         if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2989                 ret = ci_get_dependency_volt_by_clk(adev,
2990                                                     &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2991                                                     memory_clock, &memory_level->MinVddc);
2992                 if (ret)
2993                         return ret;
2994         }
2995
2996         if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2997                 ret = ci_get_dependency_volt_by_clk(adev,
2998                                                     &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2999                                                     memory_clock, &memory_level->MinVddci);
3000                 if (ret)
3001                         return ret;
3002         }
3003
3004         if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
3005                 ret = ci_get_dependency_volt_by_clk(adev,
3006                                                     &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
3007                                                     memory_clock, &memory_level->MinMvdd);
3008                 if (ret)
3009                         return ret;
3010         }
3011
3012         memory_level->MinVddcPhases = 1;
3013
3014         if (pi->vddc_phase_shed_control)
3015                 ci_populate_phase_value_based_on_mclk(adev,
3016                                                       &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3017                                                       memory_clock,
3018                                                       &memory_level->MinVddcPhases);
3019
3020         memory_level->EnabledForThrottle = 1;
3021         memory_level->EnabledForActivity = 1;
3022         memory_level->UpH = 0;
3023         memory_level->DownH = 100;
3024         memory_level->VoltageDownH = 0;
3025         memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
3026
3027         memory_level->StutterEnable = false;
3028         memory_level->StrobeEnable = false;
3029         memory_level->EdcReadEnable = false;
3030         memory_level->EdcWriteEnable = false;
3031         memory_level->RttEnable = false;
3032
3033         memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3034
3035         if (pi->mclk_stutter_mode_threshold &&
3036             (memory_clock <= pi->mclk_stutter_mode_threshold) &&
3037             (pi->uvd_enabled == false) &&
3038             (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
3039             (adev->pm.dpm.new_active_crtc_count <= 2))
3040                 memory_level->StutterEnable = true;
3041
3042         if (pi->mclk_strobe_mode_threshold &&
3043             (memory_clock <= pi->mclk_strobe_mode_threshold))
3044                 memory_level->StrobeEnable = 1;
3045
3046         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
3047                 memory_level->StrobeRatio =
3048                         ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
3049                 if (pi->mclk_edc_enable_threshold &&
3050                     (memory_clock > pi->mclk_edc_enable_threshold))
3051                         memory_level->EdcReadEnable = true;
3052
3053                 if (pi->mclk_edc_wr_enable_threshold &&
3054                     (memory_clock > pi->mclk_edc_wr_enable_threshold))
3055                         memory_level->EdcWriteEnable = true;
3056
3057                 if (memory_level->StrobeEnable) {
3058                         if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
3059                             ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
3060                                 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3061                         else
3062                                 dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
3063                 } else {
3064                         dll_state_on = pi->dll_default_on;
3065                 }
3066         } else {
3067                 memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
3068                 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3069         }
3070
3071         ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
3072         if (ret)
3073                 return ret;
3074
3075         memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
3076         memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
3077         memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
3078         memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
3079
3080         memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
3081         memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
3082         memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
3083         memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
3084         memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
3085         memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
3086         memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
3087         memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
3088         memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
3089         memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
3090         memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
3091
3092         return 0;
3093 }
3094
3095 static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
3096                                       SMU7_Discrete_DpmTable *table)
3097 {
3098         struct ci_power_info *pi = ci_get_pi(adev);
3099         struct atom_clock_dividers dividers;
3100         SMU7_Discrete_VoltageLevel voltage_level;
3101         u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
3102         u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
3103         u32 dll_cntl = pi->clock_registers.dll_cntl;
3104         u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
3105         int ret;
3106
3107         table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
3108
3109         if (pi->acpi_vddc)
3110                 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
3111         else
3112                 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
3113
3114         table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
3115
3116         table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
3117
3118         ret = amdgpu_atombios_get_clock_dividers(adev,
3119                                                  COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3120                                                  table->ACPILevel.SclkFrequency, false, &dividers);
3121         if (ret)
3122                 return ret;
3123
3124         table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3125         table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3126         table->ACPILevel.DeepSleepDivId = 0;
3127
3128         spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
3129         spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
3130
3131         spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
3132         spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
3133
3134         table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3135         table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3136         table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3137         table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3138         table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3139         table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3140         table->ACPILevel.CcPwrDynRm = 0;
3141         table->ACPILevel.CcPwrDynRm1 = 0;
3142
3143         table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3144         table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3145         table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3146         table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3147         table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3148         table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3149         table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3150         table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3151         table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3152         table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3153         table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3154
3155         table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3156         table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3157
3158         if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3159                 if (pi->acpi_vddci)
3160                         table->MemoryACPILevel.MinVddci =
3161                                 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3162                 else
3163                         table->MemoryACPILevel.MinVddci =
3164                                 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3165         }
3166
3167         if (ci_populate_mvdd_value(adev, 0, &voltage_level))
3168                 table->MemoryACPILevel.MinMvdd = 0;
3169         else
3170                 table->MemoryACPILevel.MinMvdd =
3171                         cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3172
3173         mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
3174                 MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
3175         mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
3176                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3177
3178         dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
3179
3180         table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3181         table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3182         table->MemoryACPILevel.MpllAdFuncCntl =
3183                 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3184         table->MemoryACPILevel.MpllDqFuncCntl =
3185                 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3186         table->MemoryACPILevel.MpllFuncCntl =
3187                 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3188         table->MemoryACPILevel.MpllFuncCntl_1 =
3189                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3190         table->MemoryACPILevel.MpllFuncCntl_2 =
3191                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3192         table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3193         table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3194
3195         table->MemoryACPILevel.EnabledForThrottle = 0;
3196         table->MemoryACPILevel.EnabledForActivity = 0;
3197         table->MemoryACPILevel.UpH = 0;
3198         table->MemoryACPILevel.DownH = 100;
3199         table->MemoryACPILevel.VoltageDownH = 0;
3200         table->MemoryACPILevel.ActivityLevel =
3201                 cpu_to_be16((u16)pi->mclk_activity_target);
3202
3203         table->MemoryACPILevel.StutterEnable = false;
3204         table->MemoryACPILevel.StrobeEnable = false;
3205         table->MemoryACPILevel.EdcReadEnable = false;
3206         table->MemoryACPILevel.EdcWriteEnable = false;
3207         table->MemoryACPILevel.RttEnable = false;
3208
3209         return 0;
3210 }
3211
3212
3213 static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
3214 {
3215         struct ci_power_info *pi = ci_get_pi(adev);
3216         struct ci_ulv_parm *ulv = &pi->ulv;
3217
3218         if (ulv->supported) {
3219                 if (enable)
3220                         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3221                                 0 : -EINVAL;
3222                 else
3223                         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3224                                 0 : -EINVAL;
3225         }
3226
3227         return 0;
3228 }
3229
3230 static int ci_populate_ulv_level(struct amdgpu_device *adev,
3231                                  SMU7_Discrete_Ulv *state)
3232 {
3233         struct ci_power_info *pi = ci_get_pi(adev);
3234         u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
3235
3236         state->CcPwrDynRm = 0;
3237         state->CcPwrDynRm1 = 0;
3238
3239         if (ulv_voltage == 0) {
3240                 pi->ulv.supported = false;
3241                 return 0;
3242         }
3243
3244         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3245                 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3246                         state->VddcOffset = 0;
3247                 else
3248                         state->VddcOffset =
3249                                 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3250         } else {
3251                 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3252                         state->VddcOffsetVid = 0;
3253                 else
3254                         state->VddcOffsetVid = (u8)
3255                                 ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3256                                  VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3257         }
3258         state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3259
3260         state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3261         state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3262         state->VddcOffset = cpu_to_be16(state->VddcOffset);
3263
3264         return 0;
3265 }
3266
3267 static int ci_calculate_sclk_params(struct amdgpu_device *adev,
3268                                     u32 engine_clock,
3269                                     SMU7_Discrete_GraphicsLevel *sclk)
3270 {
3271         struct ci_power_info *pi = ci_get_pi(adev);
3272         struct atom_clock_dividers dividers;
3273         u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3274         u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3275         u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3276         u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3277         u32 reference_clock = adev->clock.spll.reference_freq;
3278         u32 reference_divider;
3279         u32 fbdiv;
3280         int ret;
3281
3282         ret = amdgpu_atombios_get_clock_dividers(adev,
3283                                                  COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3284                                                  engine_clock, false, &dividers);
3285         if (ret)
3286                 return ret;
3287
3288         reference_divider = 1 + dividers.ref_div;
3289         fbdiv = dividers.fb_div & 0x3FFFFFF;
3290
3291         spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
3292         spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
3293         spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
3294
3295         if (pi->caps_sclk_ss_support) {
3296                 struct amdgpu_atom_ss ss;
3297                 u32 vco_freq = engine_clock * dividers.post_div;
3298
3299                 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
3300                                                      ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3301                         u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3302                         u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3303
3304                         cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
3305                         cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
3306                         cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
3307
3308                         cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
3309                         cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
3310                 }
3311         }
3312
3313         sclk->SclkFrequency = engine_clock;
3314         sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3315         sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3316         sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3317         sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
3318         sclk->SclkDid = (u8)dividers.post_divider;
3319
3320         return 0;
3321 }
3322
3323 static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
3324                                             u32 engine_clock,
3325                                             u16 sclk_activity_level_t,
3326                                             SMU7_Discrete_GraphicsLevel *graphic_level)
3327 {
3328         struct ci_power_info *pi = ci_get_pi(adev);
3329         int ret;
3330
3331         ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
3332         if (ret)
3333                 return ret;
3334
3335         ret = ci_get_dependency_volt_by_clk(adev,
3336                                             &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3337                                             engine_clock, &graphic_level->MinVddc);
3338         if (ret)
3339                 return ret;
3340
3341         graphic_level->SclkFrequency = engine_clock;
3342
3343         graphic_level->Flags =  0;
3344         graphic_level->MinVddcPhases = 1;
3345
3346         if (pi->vddc_phase_shed_control)
3347                 ci_populate_phase_value_based_on_sclk(adev,
3348                                                       &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3349                                                       engine_clock,
3350                                                       &graphic_level->MinVddcPhases);
3351
3352         graphic_level->ActivityLevel = sclk_activity_level_t;
3353
3354         graphic_level->CcPwrDynRm = 0;
3355         graphic_level->CcPwrDynRm1 = 0;
3356         graphic_level->EnabledForThrottle = 1;
3357         graphic_level->UpH = 0;
3358         graphic_level->DownH = 0;
3359         graphic_level->VoltageDownH = 0;
3360         graphic_level->PowerThrottle = 0;
3361
3362         if (pi->caps_sclk_ds)
3363                 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(adev,
3364                                                                                    engine_clock,
3365                                                                                    CISLAND_MINIMUM_ENGINE_CLOCK);
3366
3367         graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3368
3369         graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3370         graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3371         graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3372         graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3373         graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3374         graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3375         graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3376         graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3377         graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3378         graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3379         graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3380         graphic_level->EnabledForActivity = 1;
3381
3382         return 0;
3383 }
3384
3385 static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
3386 {
3387         struct ci_power_info *pi = ci_get_pi(adev);
3388         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3389         u32 level_array_address = pi->dpm_table_start +
3390                 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3391         u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3392                 SMU7_MAX_LEVELS_GRAPHICS;
3393         SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3394         u32 i, ret;
3395
3396         memset(levels, 0, level_array_size);
3397
3398         for (i = 0; i < dpm_table->sclk_table.count; i++) {
3399                 ret = ci_populate_single_graphic_level(adev,
3400                                                        dpm_table->sclk_table.dpm_levels[i].value,
3401                                                        (u16)pi->activity_target[i],
3402                                                        &pi->smc_state_table.GraphicsLevel[i]);
3403                 if (ret)
3404                         return ret;
3405                 if (i > 1)
3406                         pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3407                 if (i == (dpm_table->sclk_table.count - 1))
3408                         pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3409                                 PPSMC_DISPLAY_WATERMARK_HIGH;
3410         }
3411
3412         pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3413         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3414                 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3415
3416         ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3417                                    (u8 *)levels, level_array_size,
3418                                    pi->sram_end);
3419         if (ret)
3420                 return ret;
3421
3422         return 0;
3423 }
3424
3425 static int ci_populate_ulv_state(struct amdgpu_device *adev,
3426                                  SMU7_Discrete_Ulv *ulv_level)
3427 {
3428         return ci_populate_ulv_level(adev, ulv_level);
3429 }
3430
3431 static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
3432 {
3433         struct ci_power_info *pi = ci_get_pi(adev);
3434         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3435         u32 level_array_address = pi->dpm_table_start +
3436                 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3437         u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3438                 SMU7_MAX_LEVELS_MEMORY;
3439         SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3440         u32 i, ret;
3441
3442         memset(levels, 0, level_array_size);
3443
3444         for (i = 0; i < dpm_table->mclk_table.count; i++) {
3445                 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3446                         return -EINVAL;
3447                 ret = ci_populate_single_memory_level(adev,
3448                                                       dpm_table->mclk_table.dpm_levels[i].value,
3449                                                       &pi->smc_state_table.MemoryLevel[i]);
3450                 if (ret)
3451                         return ret;
3452         }
3453
3454         if ((dpm_table->mclk_table.count >= 2) &&
3455             ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
3456                 pi->smc_state_table.MemoryLevel[1].MinVddc =
3457                         pi->smc_state_table.MemoryLevel[0].MinVddc;
3458                 pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3459                         pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3460         }
3461
3462         pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3463
3464         pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3465         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3466                 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3467
3468         pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3469                 PPSMC_DISPLAY_WATERMARK_HIGH;
3470
3471         ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3472                                    (u8 *)levels, level_array_size,
3473                                    pi->sram_end);
3474         if (ret)
3475                 return ret;
3476
3477         return 0;
3478 }
3479
3480 static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
3481                                       struct ci_single_dpm_table* dpm_table,
3482                                       u32 count)
3483 {
3484         u32 i;
3485
3486         dpm_table->count = count;
3487         for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3488                 dpm_table->dpm_levels[i].enabled = false;
3489 }
3490
3491 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3492                                       u32 index, u32 pcie_gen, u32 pcie_lanes)
3493 {
3494         dpm_table->dpm_levels[index].value = pcie_gen;
3495         dpm_table->dpm_levels[index].param1 = pcie_lanes;
3496         dpm_table->dpm_levels[index].enabled = true;
3497 }
3498
3499 static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
3500 {
3501         struct ci_power_info *pi = ci_get_pi(adev);
3502
3503         if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3504                 return -EINVAL;
3505
3506         if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3507                 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3508                 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3509         } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3510                 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3511                 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3512         }
3513
3514         ci_reset_single_dpm_table(adev,
3515                                   &pi->dpm_table.pcie_speed_table,
3516                                   SMU7_MAX_LEVELS_LINK);
3517
3518         if (adev->asic_type == CHIP_BONAIRE)
3519                 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3520                                           pi->pcie_gen_powersaving.min,
3521                                           pi->pcie_lane_powersaving.max);
3522         else
3523                 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3524                                           pi->pcie_gen_powersaving.min,
3525                                           pi->pcie_lane_powersaving.min);
3526         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3527                                   pi->pcie_gen_performance.min,
3528                                   pi->pcie_lane_performance.min);
3529         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3530                                   pi->pcie_gen_powersaving.min,
3531                                   pi->pcie_lane_powersaving.max);
3532         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3533                                   pi->pcie_gen_performance.min,
3534                                   pi->pcie_lane_performance.max);
3535         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3536                                   pi->pcie_gen_powersaving.max,
3537                                   pi->pcie_lane_powersaving.max);
3538         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3539                                   pi->pcie_gen_performance.max,
3540                                   pi->pcie_lane_performance.max);
3541
3542         pi->dpm_table.pcie_speed_table.count = 6;
3543
3544         return 0;
3545 }
3546
3547 static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
3548 {
3549         struct ci_power_info *pi = ci_get_pi(adev);
3550         struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3551                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3552         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
3553                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3554         struct amdgpu_cac_leakage_table *std_voltage_table =
3555                 &adev->pm.dpm.dyn_state.cac_leakage_table;
3556         u32 i;
3557
3558         if (allowed_sclk_vddc_table == NULL)
3559                 return -EINVAL;
3560         if (allowed_sclk_vddc_table->count < 1)
3561                 return -EINVAL;
3562         if (allowed_mclk_table == NULL)
3563                 return -EINVAL;
3564         if (allowed_mclk_table->count < 1)
3565                 return -EINVAL;
3566
3567         memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3568
3569         ci_reset_single_dpm_table(adev,
3570                                   &pi->dpm_table.sclk_table,
3571                                   SMU7_MAX_LEVELS_GRAPHICS);
3572         ci_reset_single_dpm_table(adev,
3573                                   &pi->dpm_table.mclk_table,
3574                                   SMU7_MAX_LEVELS_MEMORY);
3575         ci_reset_single_dpm_table(adev,
3576                                   &pi->dpm_table.vddc_table,
3577                                   SMU7_MAX_LEVELS_VDDC);
3578         ci_reset_single_dpm_table(adev,
3579                                   &pi->dpm_table.vddci_table,
3580                                   SMU7_MAX_LEVELS_VDDCI);
3581         ci_reset_single_dpm_table(adev,
3582                                   &pi->dpm_table.mvdd_table,
3583                                   SMU7_MAX_LEVELS_MVDD);
3584
3585         pi->dpm_table.sclk_table.count = 0;
3586         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3587                 if ((i == 0) ||
3588                     (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3589                      allowed_sclk_vddc_table->entries[i].clk)) {
3590                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3591                                 allowed_sclk_vddc_table->entries[i].clk;
3592                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3593                                 (i == 0) ? true : false;
3594                         pi->dpm_table.sclk_table.count++;
3595                 }
3596         }
3597
3598         pi->dpm_table.mclk_table.count = 0;
3599         for (i = 0; i < allowed_mclk_table->count; i++) {
3600                 if ((i == 0) ||
3601                     (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3602                      allowed_mclk_table->entries[i].clk)) {
3603                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3604                                 allowed_mclk_table->entries[i].clk;
3605                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3606                                 (i == 0) ? true : false;
3607                         pi->dpm_table.mclk_table.count++;
3608                 }
3609         }
3610
3611         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3612                 pi->dpm_table.vddc_table.dpm_levels[i].value =
3613                         allowed_sclk_vddc_table->entries[i].v;
3614                 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3615                         std_voltage_table->entries[i].leakage;
3616                 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3617         }
3618         pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3619
3620         allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3621         if (allowed_mclk_table) {
3622                 for (i = 0; i < allowed_mclk_table->count; i++) {
3623                         pi->dpm_table.vddci_table.dpm_levels[i].value =
3624                                 allowed_mclk_table->entries[i].v;
3625                         pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3626                 }
3627                 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3628         }
3629
3630         allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3631         if (allowed_mclk_table) {
3632                 for (i = 0; i < allowed_mclk_table->count; i++) {
3633                         pi->dpm_table.mvdd_table.dpm_levels[i].value =
3634                                 allowed_mclk_table->entries[i].v;
3635                         pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3636                 }
3637                 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3638         }
3639
3640         ci_setup_default_pcie_tables(adev);
3641
3642         return 0;
3643 }
3644
3645 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3646                               u32 value, u32 *boot_level)
3647 {
3648         u32 i;
3649         int ret = -EINVAL;
3650
3651         for(i = 0; i < table->count; i++) {
3652                 if (value == table->dpm_levels[i].value) {
3653                         *boot_level = i;
3654                         ret = 0;
3655                 }
3656         }
3657
3658         return ret;
3659 }
3660
3661 static int ci_init_smc_table(struct amdgpu_device *adev)
3662 {
3663         struct ci_power_info *pi = ci_get_pi(adev);
3664         struct ci_ulv_parm *ulv = &pi->ulv;
3665         struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
3666         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3667         int ret;
3668
3669         ret = ci_setup_default_dpm_tables(adev);
3670         if (ret)
3671                 return ret;
3672
3673         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3674                 ci_populate_smc_voltage_tables(adev, table);
3675
3676         ci_init_fps_limits(adev);
3677
3678         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3679                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3680
3681         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3682                 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3683
3684         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
3685                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3686
3687         if (ulv->supported) {
3688                 ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
3689                 if (ret)
3690                         return ret;
3691                 WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3692         }
3693
3694         ret = ci_populate_all_graphic_levels(adev);
3695         if (ret)
3696                 return ret;
3697
3698         ret = ci_populate_all_memory_levels(adev);
3699         if (ret)
3700                 return ret;
3701
3702         ci_populate_smc_link_level(adev, table);
3703
3704         ret = ci_populate_smc_acpi_level(adev, table);
3705         if (ret)
3706                 return ret;
3707
3708         ret = ci_populate_smc_vce_level(adev, table);
3709         if (ret)
3710                 return ret;
3711
3712         ret = ci_populate_smc_acp_level(adev, table);
3713         if (ret)
3714                 return ret;
3715
3716         ret = ci_populate_smc_samu_level(adev, table);
3717         if (ret)
3718                 return ret;
3719
3720         ret = ci_do_program_memory_timing_parameters(adev);
3721         if (ret)
3722                 return ret;
3723
3724         ret = ci_populate_smc_uvd_level(adev, table);
3725         if (ret)
3726                 return ret;
3727
3728         table->UvdBootLevel  = 0;
3729         table->VceBootLevel  = 0;
3730         table->AcpBootLevel  = 0;
3731         table->SamuBootLevel  = 0;
3732         table->GraphicsBootLevel  = 0;
3733         table->MemoryBootLevel  = 0;
3734
3735         ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3736                                  pi->vbios_boot_state.sclk_bootup_value,
3737                                  (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3738
3739         ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3740                                  pi->vbios_boot_state.mclk_bootup_value,
3741                                  (u32 *)&pi->smc_state_table.MemoryBootLevel);
3742
3743         table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3744         table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3745         table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3746
3747         ci_populate_smc_initial_state(adev, amdgpu_boot_state);
3748
3749         ret = ci_populate_bapm_parameters_in_dpm_table(adev);
3750         if (ret)
3751                 return ret;
3752
3753         table->UVDInterval = 1;
3754         table->VCEInterval = 1;
3755         table->ACPInterval = 1;
3756         table->SAMUInterval = 1;
3757         table->GraphicsVoltageChangeEnable = 1;
3758         table->GraphicsThermThrottleEnable = 1;
3759         table->GraphicsInterval = 1;
3760         table->VoltageInterval = 1;
3761         table->ThermalInterval = 1;
3762         table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3763                                              CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3764         table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3765                                             CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3766         table->MemoryVoltageChangeEnable = 1;
3767         table->MemoryInterval = 1;
3768         table->VoltageResponseTime = 0;
3769         table->VddcVddciDelta = 4000;
3770         table->PhaseResponseTime = 0;
3771         table->MemoryThermThrottleEnable = 1;
3772         table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3773         table->PCIeGenInterval = 1;
3774         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3775                 table->SVI2Enable  = 1;
3776         else
3777                 table->SVI2Enable  = 0;
3778
3779         table->ThermGpio = 17;
3780         table->SclkStepSize = 0x4000;
3781
3782         table->SystemFlags = cpu_to_be32(table->SystemFlags);
3783         table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3784         table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3785         table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3786         table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3787         table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3788         table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3789         table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3790         table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3791         table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3792         table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3793         table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3794         table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3795         table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3796
3797         ret = amdgpu_ci_copy_bytes_to_smc(adev,
3798                                    pi->dpm_table_start +
3799                                    offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3800                                    (u8 *)&table->SystemFlags,
3801                                    sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3802                                    pi->sram_end);
3803         if (ret)
3804                 return ret;
3805
3806         return 0;
3807 }
3808
3809 static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
3810                                       struct ci_single_dpm_table *dpm_table,
3811                                       u32 low_limit, u32 high_limit)
3812 {
3813         u32 i;
3814
3815         for (i = 0; i < dpm_table->count; i++) {
3816                 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3817                     (dpm_table->dpm_levels[i].value > high_limit))
3818                         dpm_table->dpm_levels[i].enabled = false;
3819                 else
3820                         dpm_table->dpm_levels[i].enabled = true;
3821         }
3822 }
3823
3824 static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
3825                                     u32 speed_low, u32 lanes_low,
3826                                     u32 speed_high, u32 lanes_high)
3827 {
3828         struct ci_power_info *pi = ci_get_pi(adev);
3829         struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3830         u32 i, j;
3831
3832         for (i = 0; i < pcie_table->count; i++) {
3833                 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3834                     (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3835                     (pcie_table->dpm_levels[i].value > speed_high) ||
3836                     (pcie_table->dpm_levels[i].param1 > lanes_high))
3837                         pcie_table->dpm_levels[i].enabled = false;
3838                 else
3839                         pcie_table->dpm_levels[i].enabled = true;
3840         }
3841
3842         for (i = 0; i < pcie_table->count; i++) {
3843                 if (pcie_table->dpm_levels[i].enabled) {
3844                         for (j = i + 1; j < pcie_table->count; j++) {
3845                                 if (pcie_table->dpm_levels[j].enabled) {
3846                                         if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3847                                             (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3848                                                 pcie_table->dpm_levels[j].enabled = false;
3849                                 }
3850                         }
3851                 }
3852         }
3853 }
3854
3855 static int ci_trim_dpm_states(struct amdgpu_device *adev,
3856                               struct amdgpu_ps *amdgpu_state)
3857 {
3858         struct ci_ps *state = ci_get_ps(amdgpu_state);
3859         struct ci_power_info *pi = ci_get_pi(adev);
3860         u32 high_limit_count;
3861
3862         if (state->performance_level_count < 1)
3863                 return -EINVAL;
3864
3865         if (state->performance_level_count == 1)
3866                 high_limit_count = 0;
3867         else
3868                 high_limit_count = 1;
3869
3870         ci_trim_single_dpm_states(adev,
3871                                   &pi->dpm_table.sclk_table,
3872                                   state->performance_levels[0].sclk,
3873                                   state->performance_levels[high_limit_count].sclk);
3874
3875         ci_trim_single_dpm_states(adev,
3876                                   &pi->dpm_table.mclk_table,
3877                                   state->performance_levels[0].mclk,
3878                                   state->performance_levels[high_limit_count].mclk);
3879
3880         ci_trim_pcie_dpm_states(adev,
3881                                 state->performance_levels[0].pcie_gen,
3882                                 state->performance_levels[0].pcie_lane,
3883                                 state->performance_levels[high_limit_count].pcie_gen,
3884                                 state->performance_levels[high_limit_count].pcie_lane);
3885
3886         return 0;
3887 }
3888
3889 static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
3890 {
3891         struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
3892                 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3893         struct amdgpu_clock_voltage_dependency_table *vddc_table =
3894                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3895         u32 requested_voltage = 0;
3896         u32 i;
3897
3898         if (disp_voltage_table == NULL)
3899                 return -EINVAL;
3900         if (!disp_voltage_table->count)
3901                 return -EINVAL;
3902
3903         for (i = 0; i < disp_voltage_table->count; i++) {
3904                 if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3905                         requested_voltage = disp_voltage_table->entries[i].v;
3906         }
3907
3908         for (i = 0; i < vddc_table->count; i++) {
3909                 if (requested_voltage <= vddc_table->entries[i].v) {
3910                         requested_voltage = vddc_table->entries[i].v;
3911                         return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3912                                                                   PPSMC_MSG_VddC_Request,
3913                                                                   requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3914                                 0 : -EINVAL;
3915                 }
3916         }
3917
3918         return -EINVAL;
3919 }
3920
3921 static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
3922 {
3923         struct ci_power_info *pi = ci_get_pi(adev);
3924         PPSMC_Result result;
3925
3926         ci_apply_disp_minimum_voltage_request(adev);
3927
3928         if (!pi->sclk_dpm_key_disabled) {
3929                 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3930                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3931                                                                    PPSMC_MSG_SCLKDPM_SetEnabledMask,
3932                                                                    pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3933                         if (result != PPSMC_Result_OK)
3934                                 return -EINVAL;
3935                 }
3936         }
3937
3938         if (!pi->mclk_dpm_key_disabled) {
3939                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3940                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3941                                                                    PPSMC_MSG_MCLKDPM_SetEnabledMask,
3942                                                                    pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3943                         if (result != PPSMC_Result_OK)
3944                                 return -EINVAL;
3945                 }
3946         }
3947
3948 #if 0
3949         if (!pi->pcie_dpm_key_disabled) {
3950                 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3951                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3952                                                                    PPSMC_MSG_PCIeDPM_SetEnabledMask,
3953                                                                    pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3954                         if (result != PPSMC_Result_OK)
3955                                 return -EINVAL;
3956                 }
3957         }
3958 #endif
3959
3960         return 0;
3961 }
3962
3963 static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
3964                                                    struct amdgpu_ps *amdgpu_state)
3965 {
3966         struct ci_power_info *pi = ci_get_pi(adev);
3967         struct ci_ps *state = ci_get_ps(amdgpu_state);
3968         struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3969         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3970         struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3971         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3972         u32 i;
3973
3974         pi->need_update_smu7_dpm_table = 0;
3975
3976         for (i = 0; i < sclk_table->count; i++) {
3977                 if (sclk == sclk_table->dpm_levels[i].value)
3978                         break;
3979         }
3980
3981         if (i >= sclk_table->count) {
3982                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3983         } else {
3984                 /* XXX check display min clock requirements */
3985                 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
3986                         pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3987         }
3988
3989         for (i = 0; i < mclk_table->count; i++) {
3990                 if (mclk == mclk_table->dpm_levels[i].value)
3991                         break;
3992         }
3993
3994         if (i >= mclk_table->count)
3995                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3996
3997         if (adev->pm.dpm.current_active_crtc_count !=
3998             adev->pm.dpm.new_active_crtc_count)
3999                 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4000 }
4001
4002 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
4003                                                        struct amdgpu_ps *amdgpu_state)
4004 {
4005         struct ci_power_info *pi = ci_get_pi(adev);
4006         struct ci_ps *state = ci_get_ps(amdgpu_state);
4007         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4008         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4009         struct ci_dpm_table *dpm_table = &pi->dpm_table;
4010         int ret;
4011
4012         if (!pi->need_update_smu7_dpm_table)
4013                 return 0;
4014
4015         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
4016                 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
4017
4018         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
4019                 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
4020
4021         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
4022                 ret = ci_populate_all_graphic_levels(adev);
4023                 if (ret)
4024                         return ret;
4025         }
4026
4027         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
4028                 ret = ci_populate_all_memory_levels(adev);
4029                 if (ret)
4030                         return ret;
4031         }
4032
4033         return 0;
4034 }
4035
4036 static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
4037 {
4038         struct ci_power_info *pi = ci_get_pi(adev);
4039         const struct amdgpu_clock_and_voltage_limits *max_limits;
4040         int i;
4041
4042         if (adev->pm.dpm.ac_power)
4043                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4044         else
4045                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4046
4047         if (enable) {
4048                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
4049
4050                 for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4051                         if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4052                                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
4053
4054                                 if (!pi->caps_uvd_dpm)
4055                                         break;
4056                         }
4057                 }
4058
4059                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4060                                                   PPSMC_MSG_UVDDPM_SetEnabledMask,
4061                                                   pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
4062
4063                 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4064                         pi->uvd_enabled = true;
4065                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4066                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4067                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
4068                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4069                 }
4070         } else {
4071                 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4072                         pi->uvd_enabled = false;
4073                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
4074                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4075                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
4076                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4077                 }
4078         }
4079
4080         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4081                                    PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
4082                 0 : -EINVAL;
4083 }
4084
4085 static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
4086 {
4087         struct ci_power_info *pi = ci_get_pi(adev);
4088         const struct amdgpu_clock_and_voltage_limits *max_limits;
4089         int i;
4090
4091         if (adev->pm.dpm.ac_power)
4092                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4093         else
4094                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4095
4096         if (enable) {
4097                 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
4098                 for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4099                         if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4100                                 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
4101
4102                                 if (!pi->caps_vce_dpm)
4103                                         break;
4104                         }
4105                 }
4106
4107                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4108                                                   PPSMC_MSG_VCEDPM_SetEnabledMask,
4109                                                   pi->dpm_level_enable_mask.vce_dpm_enable_mask);
4110         }
4111
4112         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4113                                    PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
4114                 0 : -EINVAL;
4115 }
4116
4117 #if 0
4118 static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
4119 {
4120         struct ci_power_info *pi = ci_get_pi(adev);
4121         const struct amdgpu_clock_and_voltage_limits *max_limits;
4122         int i;
4123
4124         if (adev->pm.dpm.ac_power)
4125                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4126         else
4127                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4128
4129         if (enable) {
4130                 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4131                 for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4132                         if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4133                                 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4134
4135                                 if (!pi->caps_samu_dpm)
4136                                         break;
4137                         }
4138                 }
4139
4140                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4141                                                   PPSMC_MSG_SAMUDPM_SetEnabledMask,
4142                                                   pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4143         }
4144         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4145                                    PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4146                 0 : -EINVAL;
4147 }
4148
4149 static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
4150 {
4151         struct ci_power_info *pi = ci_get_pi(adev);
4152         const struct amdgpu_clock_and_voltage_limits *max_limits;
4153         int i;
4154
4155         if (adev->pm.dpm.ac_power)
4156                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4157         else
4158                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4159
4160         if (enable) {
4161                 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4162                 for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4163                         if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4164                                 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4165
4166                                 if (!pi->caps_acp_dpm)
4167                                         break;
4168                         }
4169                 }
4170
4171                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4172                                                   PPSMC_MSG_ACPDPM_SetEnabledMask,
4173                                                   pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4174         }
4175
4176         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4177                                    PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4178                 0 : -EINVAL;
4179 }
4180 #endif
4181
4182 static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
4183 {
4184         struct ci_power_info *pi = ci_get_pi(adev);
4185         u32 tmp;
4186
4187         if (!gate) {
4188                 if (pi->caps_uvd_dpm ||
4189                     (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4190                         pi->smc_state_table.UvdBootLevel = 0;
4191                 else
4192                         pi->smc_state_table.UvdBootLevel =
4193                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4194
4195                 tmp = RREG32_SMC(ixDPM_TABLE_475);
4196                 tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
4197                 tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
4198                 WREG32_SMC(ixDPM_TABLE_475, tmp);
4199         }
4200
4201         return ci_enable_uvd_dpm(adev, !gate);
4202 }
4203
4204 static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
4205 {
4206         u8 i;
4207         u32 min_evclk = 30000; /* ??? */
4208         struct amdgpu_vce_clock_voltage_dependency_table *table =
4209                 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4210
4211         for (i = 0; i < table->count; i++) {
4212                 if (table->entries[i].evclk >= min_evclk)
4213                         return i;
4214         }
4215
4216         return table->count - 1;
4217 }
4218
4219 static int ci_update_vce_dpm(struct amdgpu_device *adev,
4220                              struct amdgpu_ps *amdgpu_new_state,
4221                              struct amdgpu_ps *amdgpu_current_state)
4222 {
4223         struct ci_power_info *pi = ci_get_pi(adev);
4224         int ret = 0;
4225         u32 tmp;
4226
4227         if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
4228                 if (amdgpu_new_state->evclk) {
4229                         /* turn the clocks on when encoding */
4230                         ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
4231                                                             AMD_CG_STATE_UNGATE);
4232                         if (ret)
4233                                 return ret;
4234
4235                         pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
4236                         tmp = RREG32_SMC(ixDPM_TABLE_475);
4237                         tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
4238                         tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
4239                         WREG32_SMC(ixDPM_TABLE_475, tmp);
4240
4241                         ret = ci_enable_vce_dpm(adev, true);
4242                 } else {
4243                         /* turn the clocks off when not encoding */
4244                         ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
4245                                                             AMD_CG_STATE_GATE);
4246                         if (ret)
4247                                 return ret;
4248
4249                         ret = ci_enable_vce_dpm(adev, false);
4250                 }
4251         }
4252         return ret;
4253 }
4254
4255 #if 0
4256 static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
4257 {
4258         return ci_enable_samu_dpm(adev, gate);
4259 }
4260
4261 static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
4262 {
4263         struct ci_power_info *pi = ci_get_pi(adev);
4264         u32 tmp;
4265
4266         if (!gate) {
4267                 pi->smc_state_table.AcpBootLevel = 0;
4268
4269                 tmp = RREG32_SMC(ixDPM_TABLE_475);
4270                 tmp &= ~AcpBootLevel_MASK;
4271                 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4272                 WREG32_SMC(ixDPM_TABLE_475, tmp);
4273         }
4274
4275         return ci_enable_acp_dpm(adev, !gate);
4276 }
4277 #endif
4278
4279 static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
4280                                              struct amdgpu_ps *amdgpu_state)
4281 {
4282         struct ci_power_info *pi = ci_get_pi(adev);
4283         int ret;
4284
4285         ret = ci_trim_dpm_states(adev, amdgpu_state);
4286         if (ret)
4287                 return ret;
4288
4289         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4290                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4291         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4292                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4293         pi->last_mclk_dpm_enable_mask =
4294                 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4295         if (pi->uvd_enabled) {
4296                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4297                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4298         }
4299         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4300                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4301
4302         return 0;
4303 }
4304
4305 static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
4306                                        u32 level_mask)
4307 {
4308         u32 level = 0;
4309
4310         while ((level_mask & (1 << level)) == 0)
4311                 level++;
4312
4313         return level;
4314 }
4315
4316
4317 static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
4318                                           enum amdgpu_dpm_forced_level level)
4319 {
4320         struct ci_power_info *pi = ci_get_pi(adev);
4321         u32 tmp, levels, i;
4322         int ret;
4323
4324         if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
4325                 if ((!pi->pcie_dpm_key_disabled) &&
4326                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4327                         levels = 0;
4328                         tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4329                         while (tmp >>= 1)
4330                                 levels++;
4331                         if (levels) {
4332                                 ret = ci_dpm_force_state_pcie(adev, level);
4333                                 if (ret)
4334                                         return ret;
4335                                 for (i = 0; i < adev->usec_timeout; i++) {
4336                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4337                                         TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4338                                         TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4339                                         if (tmp == levels)
4340                                                 break;
4341                                         udelay(1);
4342                                 }
4343                         }
4344                 }
4345                 if ((!pi->sclk_dpm_key_disabled) &&
4346                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4347                         levels = 0;
4348                         tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4349                         while (tmp >>= 1)
4350                                 levels++;
4351                         if (levels) {
4352                                 ret = ci_dpm_force_state_sclk(adev, levels);
4353                                 if (ret)
4354                                         return ret;
4355                                 for (i = 0; i < adev->usec_timeout; i++) {
4356                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4357                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4358                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4359                                         if (tmp == levels)
4360                                                 break;
4361                                         udelay(1);
4362                                 }
4363                         }
4364                 }
4365                 if ((!pi->mclk_dpm_key_disabled) &&
4366                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4367                         levels = 0;
4368                         tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4369                         while (tmp >>= 1)
4370                                 levels++;
4371                         if (levels) {
4372                                 ret = ci_dpm_force_state_mclk(adev, levels);
4373                                 if (ret)
4374                                         return ret;
4375                                 for (i = 0; i < adev->usec_timeout; i++) {
4376                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4377                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4378                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4379                                         if (tmp == levels)
4380                                                 break;
4381                                         udelay(1);
4382                                 }
4383                         }
4384                 }
4385                 if ((!pi->pcie_dpm_key_disabled) &&
4386                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4387                         levels = 0;
4388                         tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4389                         while (tmp >>= 1)
4390                                 levels++;
4391                         if (levels) {
4392                                 ret = ci_dpm_force_state_pcie(adev, level);
4393                                 if (ret)
4394                                         return ret;
4395                                 for (i = 0; i < adev->usec_timeout; i++) {
4396                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4397                                         TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4398                                         TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4399                                         if (tmp == levels)
4400                                                 break;
4401                                         udelay(1);
4402                                 }
4403                         }
4404                 }
4405         } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
4406                 if ((!pi->sclk_dpm_key_disabled) &&
4407                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4408                         levels = ci_get_lowest_enabled_level(adev,
4409                                                              pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4410                         ret = ci_dpm_force_state_sclk(adev, levels);
4411                         if (ret)
4412                                 return ret;
4413                         for (i = 0; i < adev->usec_timeout; i++) {
4414                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4415                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4416                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4417                                 if (tmp == levels)
4418                                         break;
4419                                 udelay(1);
4420                         }
4421                 }
4422                 if ((!pi->mclk_dpm_key_disabled) &&
4423                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4424                         levels = ci_get_lowest_enabled_level(adev,
4425                                                              pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4426                         ret = ci_dpm_force_state_mclk(adev, levels);
4427                         if (ret)
4428                                 return ret;
4429                         for (i = 0; i < adev->usec_timeout; i++) {
4430                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4431                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4432                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4433                                 if (tmp == levels)
4434                                         break;
4435                                 udelay(1);
4436                         }
4437                 }
4438                 if ((!pi->pcie_dpm_key_disabled) &&
4439                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4440                         levels = ci_get_lowest_enabled_level(adev,
4441                                                              pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4442                         ret = ci_dpm_force_state_pcie(adev, levels);
4443                         if (ret)
4444                                 return ret;
4445                         for (i = 0; i < adev->usec_timeout; i++) {
4446                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4447                                 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4448                                 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4449                                 if (tmp == levels)
4450                                         break;
4451                                 udelay(1);
4452                         }
4453                 }
4454         } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
4455                 if (!pi->pcie_dpm_key_disabled) {
4456                         PPSMC_Result smc_result;
4457
4458                         smc_result = amdgpu_ci_send_msg_to_smc(adev,
4459                                                                PPSMC_MSG_PCIeDPM_UnForceLevel);
4460                         if (smc_result != PPSMC_Result_OK)
4461                                 return -EINVAL;
4462                 }
4463                 ret = ci_upload_dpm_level_enable_mask(adev);
4464                 if (ret)
4465                         return ret;
4466         }
4467
4468         adev->pm.dpm.forced_level = level;
4469
4470         return 0;
4471 }
4472
4473 static int ci_set_mc_special_registers(struct amdgpu_device *adev,
4474                                        struct ci_mc_reg_table *table)
4475 {
4476         u8 i, j, k;
4477         u32 temp_reg;
4478
4479         for (i = 0, j = table->last; i < table->last; i++) {
4480                 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4481                         return -EINVAL;
4482                 switch(table->mc_reg_address[i].s1) {
4483                 case mmMC_SEQ_MISC1:
4484                         temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
4485                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
4486                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
4487                         for (k = 0; k < table->num_entries; k++) {
4488                                 table->mc_reg_table_entry[k].mc_data[j] =
4489                                         ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4490                         }
4491                         j++;
4492                         if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4493                                 return -EINVAL;
4494
4495                         temp_reg = RREG32(mmMC_PMG_CMD_MRS);
4496                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
4497                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
4498                         for (k = 0; k < table->num_entries; k++) {
4499                                 table->mc_reg_table_entry[k].mc_data[j] =
4500                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4501                                 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
4502                                         table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4503                         }
4504                         j++;
4505                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4506                                 return -EINVAL;
4507
4508                         if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
4509                                 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
4510                                 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
4511                                 for (k = 0; k < table->num_entries; k++) {
4512                                         table->mc_reg_table_entry[k].mc_data[j] =
4513                                                 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4514                                 }
4515                                 j++;
4516                                 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4517                                         return -EINVAL;
4518                         }
4519                         break;
4520                 case mmMC_SEQ_RESERVE_M:
4521                         temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
4522                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
4523                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
4524                         for (k = 0; k < table->num_entries; k++) {
4525                                 table->mc_reg_table_entry[k].mc_data[j] =
4526                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4527                         }
4528                         j++;
4529                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4530                                 return -EINVAL;
4531                         break;
4532                 default:
4533                         break;
4534                 }
4535
4536         }
4537
4538         table->last = j;
4539
4540         return 0;
4541 }
4542
4543 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4544 {
4545         bool result = true;
4546
4547         switch(in_reg) {
4548         case mmMC_SEQ_RAS_TIMING:
4549                 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
4550                 break;
4551         case mmMC_SEQ_DLL_STBY:
4552                 *out_reg = mmMC_SEQ_DLL_STBY_LP;
4553                 break;
4554         case mmMC_SEQ_G5PDX_CMD0:
4555                 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
4556                 break;
4557         case mmMC_SEQ_G5PDX_CMD1:
4558                 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
4559                 break;
4560         case mmMC_SEQ_G5PDX_CTRL:
4561                 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
4562                 break;
4563         case mmMC_SEQ_CAS_TIMING:
4564                 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
4565             break;
4566         case mmMC_SEQ_MISC_TIMING:
4567                 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
4568                 break;
4569         case mmMC_SEQ_MISC_TIMING2:
4570                 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
4571                 break;
4572         case mmMC_SEQ_PMG_DVS_CMD:
4573                 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
4574                 break;
4575         case mmMC_SEQ_PMG_DVS_CTL:
4576                 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
4577                 break;
4578         case mmMC_SEQ_RD_CTL_D0:
4579                 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
4580                 break;
4581         case mmMC_SEQ_RD_CTL_D1:
4582                 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
4583                 break;
4584         case mmMC_SEQ_WR_CTL_D0:
4585                 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
4586                 break;
4587         case mmMC_SEQ_WR_CTL_D1:
4588                 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
4589                 break;
4590         case mmMC_PMG_CMD_EMRS:
4591                 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
4592                 break;
4593         case mmMC_PMG_CMD_MRS:
4594                 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
4595                 break;
4596         case mmMC_PMG_CMD_MRS1:
4597                 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
4598                 break;
4599         case mmMC_SEQ_PMG_TIMING:
4600                 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
4601                 break;
4602         case mmMC_PMG_CMD_MRS2:
4603                 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
4604                 break;
4605         case mmMC_SEQ_WR_CTL_2:
4606                 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
4607                 break;
4608         default:
4609                 result = false;
4610                 break;
4611         }
4612
4613         return result;
4614 }
4615
4616 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4617 {
4618         u8 i, j;
4619
4620         for (i = 0; i < table->last; i++) {
4621                 for (j = 1; j < table->num_entries; j++) {
4622                         if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4623                             table->mc_reg_table_entry[j].mc_data[i]) {
4624                                 table->valid_flag |= 1 << i;
4625                                 break;
4626                         }
4627                 }
4628         }
4629 }
4630
4631 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4632 {
4633         u32 i;
4634         u16 address;
4635
4636         for (i = 0; i < table->last; i++) {
4637                 table->mc_reg_address[i].s0 =
4638                         ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4639                         address : table->mc_reg_address[i].s1;
4640         }
4641 }
4642
4643 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4644                                       struct ci_mc_reg_table *ci_table)
4645 {
4646         u8 i, j;
4647
4648         if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4649                 return -EINVAL;
4650         if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4651                 return -EINVAL;
4652
4653         for (i = 0; i < table->last; i++)
4654                 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4655
4656         ci_table->last = table->last;
4657
4658         for (i = 0; i < table->num_entries; i++) {
4659                 ci_table->mc_reg_table_entry[i].mclk_max =
4660                         table->mc_reg_table_entry[i].mclk_max;
4661                 for (j = 0; j < table->last; j++)
4662                         ci_table->mc_reg_table_entry[i].mc_data[j] =
4663                                 table->mc_reg_table_entry[i].mc_data[j];
4664         }
4665         ci_table->num_entries = table->num_entries;
4666
4667         return 0;
4668 }
4669
4670 static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
4671                                        struct ci_mc_reg_table *table)
4672 {
4673         u8 i, k;
4674         u32 tmp;
4675         bool patch;
4676
4677         tmp = RREG32(mmMC_SEQ_MISC0);
4678         patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4679
4680         if (patch &&
4681             ((adev->pdev->device == 0x67B0) ||
4682              (adev->pdev->device == 0x67B1))) {
4683                 for (i = 0; i < table->last; i++) {
4684                         if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4685                                 return -EINVAL;
4686                         switch (table->mc_reg_address[i].s1) {
4687                         case mmMC_SEQ_MISC1:
4688                                 for (k = 0; k < table->num_entries; k++) {
4689                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4690                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4691                                                 table->mc_reg_table_entry[k].mc_data[i] =
4692                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4693                                                         0x00000007;
4694                                 }
4695                                 break;
4696                         case mmMC_SEQ_WR_CTL_D0:
4697                                 for (k = 0; k < table->num_entries; k++) {
4698                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4699                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4700                                                 table->mc_reg_table_entry[k].mc_data[i] =
4701                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4702                                                         0x0000D0DD;
4703                                 }
4704                                 break;
4705                         case mmMC_SEQ_WR_CTL_D1:
4706                                 for (k = 0; k < table->num_entries; k++) {
4707                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4708                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4709                                                 table->mc_reg_table_entry[k].mc_data[i] =
4710                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4711                                                         0x0000D0DD;
4712                                 }
4713                                 break;
4714                         case mmMC_SEQ_WR_CTL_2:
4715                                 for (k = 0; k < table->num_entries; k++) {
4716                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4717                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4718                                                 table->mc_reg_table_entry[k].mc_data[i] = 0;
4719                                 }
4720                                 break;
4721                         case mmMC_SEQ_CAS_TIMING:
4722                                 for (k = 0; k < table->num_entries; k++) {
4723                                         if (table->mc_reg_table_entry[k].mclk_max == 125000)
4724                                                 table->mc_reg_table_entry[k].mc_data[i] =
4725                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4726                                                         0x000C0140;
4727                                         else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4728                                                 table->mc_reg_table_entry[k].mc_data[i] =
4729                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4730                                                         0x000C0150;
4731                                 }
4732                                 break;
4733                         case mmMC_SEQ_MISC_TIMING:
4734                                 for (k = 0; k < table->num_entries; k++) {
4735                                         if (table->mc_reg_table_entry[k].mclk_max == 125000)
4736                                                 table->mc_reg_table_entry[k].mc_data[i] =
4737                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4738                                                         0x00000030;
4739                                         else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4740                                                 table->mc_reg_table_entry[k].mc_data[i] =
4741                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4742                                                         0x00000035;
4743                                 }
4744                                 break;
4745                         default:
4746                                 break;
4747                         }
4748                 }
4749
4750                 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4751                 tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
4752                 tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4753                 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4754                 WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
4755         }
4756
4757         return 0;
4758 }
4759
4760 static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
4761 {
4762         struct ci_power_info *pi = ci_get_pi(adev);
4763         struct atom_mc_reg_table *table;
4764         struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4765         u8 module_index = ci_get_memory_module_index(adev);
4766         int ret;
4767
4768         table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4769         if (!table)
4770                 return -ENOMEM;
4771
4772         WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
4773         WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
4774         WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
4775         WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
4776         WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
4777         WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
4778         WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
4779         WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
4780         WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
4781         WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
4782         WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
4783         WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
4784         WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
4785         WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
4786         WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
4787         WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
4788         WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
4789         WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
4790         WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
4791         WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
4792
4793         ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
4794         if (ret)
4795                 goto init_mc_done;
4796
4797         ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4798         if (ret)
4799                 goto init_mc_done;
4800
4801         ci_set_s0_mc_reg_index(ci_table);
4802
4803         ret = ci_register_patching_mc_seq(adev, ci_table);
4804         if (ret)
4805                 goto init_mc_done;
4806
4807         ret = ci_set_mc_special_registers(adev, ci_table);
4808         if (ret)
4809                 goto init_mc_done;
4810
4811         ci_set_valid_flag(ci_table);
4812
4813 init_mc_done:
4814         kfree(table);
4815
4816         return ret;
4817 }
4818
4819 static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
4820                                         SMU7_Discrete_MCRegisters *mc_reg_table)
4821 {
4822         struct ci_power_info *pi = ci_get_pi(adev);
4823         u32 i, j;
4824
4825         for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4826                 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4827                         if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4828                                 return -EINVAL;
4829                         mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4830                         mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4831                         i++;
4832                 }
4833         }
4834
4835         mc_reg_table->last = (u8)i;
4836
4837         return 0;
4838 }
4839
4840 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4841                                     SMU7_Discrete_MCRegisterSet *data,
4842                                     u32 num_entries, u32 valid_flag)
4843 {
4844         u32 i, j;
4845
4846         for (i = 0, j = 0; j < num_entries; j++) {
4847                 if (valid_flag & (1 << j)) {
4848                         data->value[i] = cpu_to_be32(entry->mc_data[j]);
4849                         i++;
4850                 }
4851         }
4852 }
4853
4854 static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
4855                                                  const u32 memory_clock,
4856                                                  SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4857 {
4858         struct ci_power_info *pi = ci_get_pi(adev);
4859         u32 i = 0;
4860
4861         for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4862                 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4863                         break;
4864         }
4865
4866         if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4867                 --i;
4868
4869         ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4870                                 mc_reg_table_data, pi->mc_reg_table.last,
4871                                 pi->mc_reg_table.valid_flag);
4872 }
4873
4874 static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
4875                                            SMU7_Discrete_MCRegisters *mc_reg_table)
4876 {
4877         struct ci_power_info *pi = ci_get_pi(adev);
4878         u32 i;
4879
4880         for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4881                 ci_convert_mc_reg_table_entry_to_smc(adev,
4882                                                      pi->dpm_table.mclk_table.dpm_levels[i].value,
4883                                                      &mc_reg_table->data[i]);
4884 }
4885
4886 static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
4887 {
4888         struct ci_power_info *pi = ci_get_pi(adev);
4889         int ret;
4890
4891         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4892
4893         ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
4894         if (ret)
4895                 return ret;
4896         ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4897
4898         return amdgpu_ci_copy_bytes_to_smc(adev,
4899                                     pi->mc_reg_table_start,
4900                                     (u8 *)&pi->smc_mc_reg_table,
4901                                     sizeof(SMU7_Discrete_MCRegisters),
4902                                     pi->sram_end);
4903 }
4904
4905 static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
4906 {
4907         struct ci_power_info *pi = ci_get_pi(adev);
4908
4909         if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4910                 return 0;
4911
4912         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4913
4914         ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4915
4916         return amdgpu_ci_copy_bytes_to_smc(adev,
4917                                     pi->mc_reg_table_start +
4918                                     offsetof(SMU7_Discrete_MCRegisters, data[0]),
4919                                     (u8 *)&pi->smc_mc_reg_table.data[0],
4920                                     sizeof(SMU7_Discrete_MCRegisterSet) *
4921                                     pi->dpm_table.mclk_table.count,
4922                                     pi->sram_end);
4923 }
4924
4925 static void ci_enable_voltage_control(struct amdgpu_device *adev)
4926 {
4927         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
4928
4929         tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
4930         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
4931 }
4932
4933 static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
4934                                                       struct amdgpu_ps *amdgpu_state)
4935 {
4936         struct ci_ps *state = ci_get_ps(amdgpu_state);
4937         int i;
4938         u16 pcie_speed, max_speed = 0;
4939
4940         for (i = 0; i < state->performance_level_count; i++) {
4941                 pcie_speed = state->performance_levels[i].pcie_gen;
4942                 if (max_speed < pcie_speed)
4943                         max_speed = pcie_speed;
4944         }
4945
4946         return max_speed;
4947 }
4948
4949 static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
4950 {
4951         u32 speed_cntl = 0;
4952
4953         speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
4954                 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
4955         speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
4956
4957         return (u16)speed_cntl;
4958 }
4959
4960 static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
4961 {
4962         u32 link_width = 0;
4963
4964         link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
4965                 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
4966         link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
4967
4968         switch (link_width) {
4969         case 1:
4970                 return 1;
4971         case 2:
4972                 return 2;
4973         case 3:
4974                 return 4;
4975         case 4:
4976                 return 8;
4977         case 0:
4978         case 6:
4979         default:
4980                 return 16;
4981         }
4982 }
4983
4984 static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
4985                                                              struct amdgpu_ps *amdgpu_new_state,
4986                                                              struct amdgpu_ps *amdgpu_current_state)
4987 {
4988         struct ci_power_info *pi = ci_get_pi(adev);
4989         enum amdgpu_pcie_gen target_link_speed =
4990                 ci_get_maximum_link_speed(adev, amdgpu_new_state);
4991         enum amdgpu_pcie_gen current_link_speed;
4992
4993         if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
4994                 current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
4995         else
4996                 current_link_speed = pi->force_pcie_gen;
4997
4998         pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
4999         pi->pspp_notify_required = false;
5000         if (target_link_speed > current_link_speed) {
5001                 switch (target_link_speed) {
5002 #ifdef CONFIG_ACPI
5003                 case AMDGPU_PCIE_GEN3:
5004                         if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
5005                                 break;
5006                         pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
5007                         if (current_link_speed == AMDGPU_PCIE_GEN2)
5008                                 break;
5009                 case AMDGPU_PCIE_GEN2:
5010                         if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
5011                                 break;
5012 #endif
5013                 default:
5014                         pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
5015                         break;
5016                 }
5017         } else {
5018                 if (target_link_speed < current_link_speed)
5019                         pi->pspp_notify_required = true;
5020         }
5021 }
5022
5023 static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
5024                                                            struct amdgpu_ps *amdgpu_new_state,
5025                                                            struct amdgpu_ps *amdgpu_current_state)
5026 {
5027         struct ci_power_info *pi = ci_get_pi(adev);
5028         enum amdgpu_pcie_gen target_link_speed =
5029                 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5030         u8 request;
5031
5032         if (pi->pspp_notify_required) {
5033                 if (target_link_speed == AMDGPU_PCIE_GEN3)
5034                         request = PCIE_PERF_REQ_PECI_GEN3;
5035                 else if (target_link_speed == AMDGPU_PCIE_GEN2)
5036                         request = PCIE_PERF_REQ_PECI_GEN2;
5037                 else
5038                         request = PCIE_PERF_REQ_PECI_GEN1;
5039
5040                 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
5041                     (ci_get_current_pcie_speed(adev) > 0))
5042                         return;
5043
5044 #ifdef CONFIG_ACPI
5045                 amdgpu_acpi_pcie_performance_request(adev, request, false);
5046 #endif
5047         }
5048 }
5049
5050 static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
5051 {
5052         struct ci_power_info *pi = ci_get_pi(adev);
5053         struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
5054                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
5055         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
5056                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
5057         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
5058                 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
5059
5060         if (allowed_sclk_vddc_table == NULL)
5061                 return -EINVAL;
5062         if (allowed_sclk_vddc_table->count < 1)
5063                 return -EINVAL;
5064         if (allowed_mclk_vddc_table == NULL)
5065                 return -EINVAL;
5066         if (allowed_mclk_vddc_table->count < 1)
5067                 return -EINVAL;
5068         if (allowed_mclk_vddci_table == NULL)
5069                 return -EINVAL;
5070         if (allowed_mclk_vddci_table->count < 1)
5071                 return -EINVAL;
5072
5073         pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
5074         pi->max_vddc_in_pp_table =
5075                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5076
5077         pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
5078         pi->max_vddci_in_pp_table =
5079                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5080
5081         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
5082                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5083         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
5084                 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5085         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
5086                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5087         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
5088                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5089
5090         return 0;
5091 }
5092
5093 static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
5094 {
5095         struct ci_power_info *pi = ci_get_pi(adev);
5096         struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
5097         u32 leakage_index;
5098
5099         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5100                 if (leakage_table->leakage_id[leakage_index] == *vddc) {
5101                         *vddc = leakage_table->actual_voltage[leakage_index];
5102                         break;
5103                 }
5104         }
5105 }
5106
5107 static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
5108 {
5109         struct ci_power_info *pi = ci_get_pi(adev);
5110         struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
5111         u32 leakage_index;
5112
5113         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5114                 if (leakage_table->leakage_id[leakage_index] == *vddci) {
5115                         *vddci = leakage_table->actual_voltage[leakage_index];
5116                         break;
5117                 }
5118         }
5119 }
5120
5121 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5122                                                                       struct amdgpu_clock_voltage_dependency_table *table)
5123 {
5124         u32 i;
5125
5126         if (table) {
5127                 for (i = 0; i < table->count; i++)
5128                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5129         }
5130 }
5131
5132 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
5133                                                                        struct amdgpu_clock_voltage_dependency_table *table)
5134 {
5135         u32 i;
5136
5137         if (table) {
5138                 for (i = 0; i < table->count; i++)
5139                         ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
5140         }
5141 }
5142
5143 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5144                                                                           struct amdgpu_vce_clock_voltage_dependency_table *table)
5145 {
5146         u32 i;
5147
5148         if (table) {
5149                 for (i = 0; i < table->count; i++)
5150                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5151         }
5152 }
5153
5154 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5155                                                                           struct amdgpu_uvd_clock_voltage_dependency_table *table)
5156 {
5157         u32 i;
5158
5159         if (table) {
5160                 for (i = 0; i < table->count; i++)
5161                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5162         }
5163 }
5164
5165 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
5166                                                                    struct amdgpu_phase_shedding_limits_table *table)
5167 {
5168         u32 i;
5169
5170         if (table) {
5171                 for (i = 0; i < table->count; i++)
5172                         ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
5173         }
5174 }
5175
5176 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
5177                                                             struct amdgpu_clock_and_voltage_limits *table)
5178 {
5179         if (table) {
5180                 ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
5181                 ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
5182         }
5183 }
5184
5185 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
5186                                                          struct amdgpu_cac_leakage_table *table)
5187 {
5188         u32 i;
5189
5190         if (table) {
5191                 for (i = 0; i < table->count; i++)
5192                         ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
5193         }
5194 }
5195
5196 static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
5197 {
5198
5199         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5200                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5201         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5202                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5203         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5204                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5205         ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
5206                                                                    &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5207         ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
5208                                                                       &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5209         ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
5210                                                                       &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5211         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5212                                                                   &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5213         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5214                                                                   &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5215         ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
5216                                                                &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
5217         ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5218                                                         &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5219         ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5220                                                         &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5221         ci_patch_cac_leakage_table_with_vddc_leakage(adev,
5222                                                      &adev->pm.dpm.dyn_state.cac_leakage_table);
5223
5224 }
5225
5226 static void ci_update_current_ps(struct amdgpu_device *adev,
5227                                  struct amdgpu_ps *rps)
5228 {
5229         struct ci_ps *new_ps = ci_get_ps(rps);
5230         struct ci_power_info *pi = ci_get_pi(adev);
5231
5232         pi->current_rps = *rps;
5233         pi->current_ps = *new_ps;
5234         pi->current_rps.ps_priv = &pi->current_ps;
5235 }
5236
5237 static void ci_update_requested_ps(struct amdgpu_device *adev,
5238                                    struct amdgpu_ps *rps)
5239 {
5240         struct ci_ps *new_ps = ci_get_ps(rps);
5241         struct ci_power_info *pi = ci_get_pi(adev);
5242
5243         pi->requested_rps = *rps;
5244         pi->requested_ps = *new_ps;
5245         pi->requested_rps.ps_priv = &pi->requested_ps;
5246 }
5247
5248 static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
5249 {
5250         struct ci_power_info *pi = ci_get_pi(adev);
5251         struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
5252         struct amdgpu_ps *new_ps = &requested_ps;
5253
5254         ci_update_requested_ps(adev, new_ps);
5255
5256         ci_apply_state_adjust_rules(adev, &pi->requested_rps);
5257
5258         return 0;
5259 }
5260
5261 static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
5262 {
5263         struct ci_power_info *pi = ci_get_pi(adev);
5264         struct amdgpu_ps *new_ps = &pi->requested_rps;
5265
5266         ci_update_current_ps(adev, new_ps);
5267 }
5268
5269
5270 static void ci_dpm_setup_asic(struct amdgpu_device *adev)
5271 {
5272         ci_read_clock_registers(adev);
5273         ci_enable_acpi_power_management(adev);
5274         ci_init_sclk_t(adev);
5275 }
5276
5277 static int ci_dpm_enable(struct amdgpu_device *adev)
5278 {
5279         struct ci_power_info *pi = ci_get_pi(adev);
5280         struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5281         int ret;
5282
5283         if (amdgpu_ci_is_smc_running(adev))
5284                 return -EINVAL;
5285         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5286                 ci_enable_voltage_control(adev);
5287                 ret = ci_construct_voltage_tables(adev);
5288                 if (ret) {
5289                         DRM_ERROR("ci_construct_voltage_tables failed\n");
5290                         return ret;
5291                 }
5292         }
5293         if (pi->caps_dynamic_ac_timing) {
5294                 ret = ci_initialize_mc_reg_table(adev);
5295                 if (ret)
5296                         pi->caps_dynamic_ac_timing = false;
5297         }
5298         if (pi->dynamic_ss)
5299                 ci_enable_spread_spectrum(adev, true);
5300         if (pi->thermal_protection)
5301                 ci_enable_thermal_protection(adev, true);
5302         ci_program_sstp(adev);
5303         ci_enable_display_gap(adev);
5304         ci_program_vc(adev);
5305         ret = ci_upload_firmware(adev);
5306         if (ret) {
5307                 DRM_ERROR("ci_upload_firmware failed\n");
5308                 return ret;
5309         }
5310         ret = ci_process_firmware_header(adev);
5311         if (ret) {
5312                 DRM_ERROR("ci_process_firmware_header failed\n");
5313                 return ret;
5314         }
5315         ret = ci_initial_switch_from_arb_f0_to_f1(adev);
5316         if (ret) {
5317                 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5318                 return ret;
5319         }
5320         ret = ci_init_smc_table(adev);
5321         if (ret) {
5322                 DRM_ERROR("ci_init_smc_table failed\n");
5323                 return ret;
5324         }
5325         ret = ci_init_arb_table_index(adev);
5326         if (ret) {
5327                 DRM_ERROR("ci_init_arb_table_index failed\n");
5328                 return ret;
5329         }
5330         if (pi->caps_dynamic_ac_timing) {
5331                 ret = ci_populate_initial_mc_reg_table(adev);
5332                 if (ret) {
5333                         DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5334                         return ret;
5335                 }
5336         }
5337         ret = ci_populate_pm_base(adev);
5338         if (ret) {
5339                 DRM_ERROR("ci_populate_pm_base failed\n");
5340                 return ret;
5341         }
5342         ci_dpm_start_smc(adev);
5343         ci_enable_vr_hot_gpio_interrupt(adev);
5344         ret = ci_notify_smc_display_change(adev, false);
5345         if (ret) {
5346                 DRM_ERROR("ci_notify_smc_display_change failed\n");
5347                 return ret;
5348         }
5349         ci_enable_sclk_control(adev, true);
5350         ret = ci_enable_ulv(adev, true);
5351         if (ret) {
5352                 DRM_ERROR("ci_enable_ulv failed\n");
5353                 return ret;
5354         }
5355         ret = ci_enable_ds_master_switch(adev, true);
5356         if (ret) {
5357                 DRM_ERROR("ci_enable_ds_master_switch failed\n");
5358                 return ret;
5359         }
5360         ret = ci_start_dpm(adev);
5361         if (ret) {
5362                 DRM_ERROR("ci_start_dpm failed\n");
5363                 return ret;
5364         }
5365         ret = ci_enable_didt(adev, true);
5366         if (ret) {
5367                 DRM_ERROR("ci_enable_didt failed\n");
5368                 return ret;
5369         }
5370         ret = ci_enable_smc_cac(adev, true);
5371         if (ret) {
5372                 DRM_ERROR("ci_enable_smc_cac failed\n");
5373                 return ret;
5374         }
5375         ret = ci_enable_power_containment(adev, true);
5376         if (ret) {
5377                 DRM_ERROR("ci_enable_power_containment failed\n");
5378                 return ret;
5379         }
5380
5381         ret = ci_power_control_set_level(adev);
5382         if (ret) {
5383                 DRM_ERROR("ci_power_control_set_level failed\n");
5384                 return ret;
5385         }
5386
5387         ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5388
5389         ret = ci_enable_thermal_based_sclk_dpm(adev, true);
5390         if (ret) {
5391                 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5392                 return ret;
5393         }
5394
5395         ci_thermal_start_thermal_controller(adev);
5396
5397         ci_update_current_ps(adev, boot_ps);
5398
5399         if (adev->irq.installed &&
5400             amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
5401 #if 0
5402                 PPSMC_Result result;
5403 #endif
5404                 ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
5405                                                        CISLANDS_TEMP_RANGE_MAX);
5406                 if (ret) {
5407                         DRM_ERROR("ci_thermal_set_temperature_range failed\n");
5408                         return ret;
5409                 }
5410                 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
5411                                AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
5412                 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
5413                                AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
5414
5415 #if 0
5416                 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
5417
5418                 if (result != PPSMC_Result_OK)
5419                         DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
5420 #endif
5421         }
5422
5423         return 0;
5424 }
5425
5426 static void ci_dpm_disable(struct amdgpu_device *adev)
5427 {
5428         struct ci_power_info *pi = ci_get_pi(adev);
5429         struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5430
5431         amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5432                        AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
5433         amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5434                        AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
5435
5436         ci_dpm_powergate_uvd(adev, false);
5437
5438         if (!amdgpu_ci_is_smc_running(adev))
5439                 return;
5440
5441         ci_thermal_stop_thermal_controller(adev);
5442
5443         if (pi->thermal_protection)
5444                 ci_enable_thermal_protection(adev, false);
5445         ci_enable_power_containment(adev, false);
5446         ci_enable_smc_cac(adev, false);
5447         ci_enable_didt(adev, false);
5448         ci_enable_spread_spectrum(adev, false);
5449         ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5450         ci_stop_dpm(adev);
5451         ci_enable_ds_master_switch(adev, false);
5452         ci_enable_ulv(adev, false);
5453         ci_clear_vc(adev);
5454         ci_reset_to_default(adev);
5455         ci_dpm_stop_smc(adev);
5456         ci_force_switch_to_arb_f0(adev);
5457         ci_enable_thermal_based_sclk_dpm(adev, false);
5458
5459         ci_update_current_ps(adev, boot_ps);
5460 }
5461
5462 static int ci_dpm_set_power_state(struct amdgpu_device *adev)
5463 {
5464         struct ci_power_info *pi = ci_get_pi(adev);
5465         struct amdgpu_ps *new_ps = &pi->requested_rps;
5466         struct amdgpu_ps *old_ps = &pi->current_rps;
5467         int ret;
5468
5469         ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
5470         if (pi->pcie_performance_request)
5471                 ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
5472         ret = ci_freeze_sclk_mclk_dpm(adev);
5473         if (ret) {
5474                 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5475                 return ret;
5476         }
5477         ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
5478         if (ret) {
5479                 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5480                 return ret;
5481         }
5482         ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
5483         if (ret) {
5484                 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5485                 return ret;
5486         }
5487
5488         ret = ci_update_vce_dpm(adev, new_ps, old_ps);
5489         if (ret) {
5490                 DRM_ERROR("ci_update_vce_dpm failed\n");
5491                 return ret;
5492         }
5493
5494         ret = ci_update_sclk_t(adev);
5495         if (ret) {
5496                 DRM_ERROR("ci_update_sclk_t failed\n");
5497                 return ret;
5498         }
5499         if (pi->caps_dynamic_ac_timing) {
5500                 ret = ci_update_and_upload_mc_reg_table(adev);
5501                 if (ret) {
5502                         DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5503                         return ret;
5504                 }
5505         }
5506         ret = ci_program_memory_timing_parameters(adev);
5507         if (ret) {
5508                 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5509                 return ret;
5510         }
5511         ret = ci_unfreeze_sclk_mclk_dpm(adev);
5512         if (ret) {
5513                 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5514                 return ret;
5515         }
5516         ret = ci_upload_dpm_level_enable_mask(adev);
5517         if (ret) {
5518                 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5519                 return ret;
5520         }
5521         if (pi->pcie_performance_request)
5522                 ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
5523
5524         return 0;
5525 }
5526
5527 #if 0
5528 static void ci_dpm_reset_asic(struct amdgpu_device *adev)
5529 {
5530         ci_set_boot_state(adev);
5531 }
5532 #endif
5533
5534 static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
5535 {
5536         ci_program_display_gap(adev);
5537 }
5538
5539 union power_info {
5540         struct _ATOM_POWERPLAY_INFO info;
5541         struct _ATOM_POWERPLAY_INFO_V2 info_2;
5542         struct _ATOM_POWERPLAY_INFO_V3 info_3;
5543         struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5544         struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5545         struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5546 };
5547
5548 union pplib_clock_info {
5549         struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5550         struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5551         struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5552         struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5553         struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5554         struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5555 };
5556
5557 union pplib_power_state {
5558         struct _ATOM_PPLIB_STATE v1;
5559         struct _ATOM_PPLIB_STATE_V2 v2;
5560 };
5561
5562 static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
5563                                           struct amdgpu_ps *rps,
5564                                           struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5565                                           u8 table_rev)
5566 {
5567         rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5568         rps->class = le16_to_cpu(non_clock_info->usClassification);
5569         rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5570
5571         if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5572                 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5573                 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5574         } else {
5575                 rps->vclk = 0;
5576                 rps->dclk = 0;
5577         }
5578
5579         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5580                 adev->pm.dpm.boot_ps = rps;
5581         if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5582                 adev->pm.dpm.uvd_ps = rps;
5583 }
5584
5585 static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
5586                                       struct amdgpu_ps *rps, int index,
5587                                       union pplib_clock_info *clock_info)
5588 {
5589         struct ci_power_info *pi = ci_get_pi(adev);
5590         struct ci_ps *ps = ci_get_ps(rps);
5591         struct ci_pl *pl = &ps->performance_levels[index];
5592
5593         ps->performance_level_count = index + 1;
5594
5595         pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5596         pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5597         pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5598         pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5599
5600         pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
5601                                                    pi->sys_pcie_mask,
5602                                                    pi->vbios_boot_state.pcie_gen_bootup_value,
5603                                                    clock_info->ci.ucPCIEGen);
5604         pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
5605                                                      pi->vbios_boot_state.pcie_lane_bootup_value,
5606                                                      le16_to_cpu(clock_info->ci.usPCIELane));
5607
5608         if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5609                 pi->acpi_pcie_gen = pl->pcie_gen;
5610         }
5611
5612         if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5613                 pi->ulv.supported = true;
5614                 pi->ulv.pl = *pl;
5615                 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5616         }
5617
5618         /* patch up boot state */
5619         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5620                 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5621                 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5622                 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5623                 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5624         }
5625
5626         switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5627         case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5628                 pi->use_pcie_powersaving_levels = true;
5629                 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5630                         pi->pcie_gen_powersaving.max = pl->pcie_gen;
5631                 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5632                         pi->pcie_gen_powersaving.min = pl->pcie_gen;
5633                 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5634                         pi->pcie_lane_powersaving.max = pl->pcie_lane;
5635                 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5636                         pi->pcie_lane_powersaving.min = pl->pcie_lane;
5637                 break;
5638         case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5639                 pi->use_pcie_performance_levels = true;
5640                 if (pi->pcie_gen_performance.max < pl->pcie_gen)
5641                         pi->pcie_gen_performance.max = pl->pcie_gen;
5642                 if (pi->pcie_gen_performance.min > pl->pcie_gen)
5643                         pi->pcie_gen_performance.min = pl->pcie_gen;
5644                 if (pi->pcie_lane_performance.max < pl->pcie_lane)
5645                         pi->pcie_lane_performance.max = pl->pcie_lane;
5646                 if (pi->pcie_lane_performance.min > pl->pcie_lane)
5647                         pi->pcie_lane_performance.min = pl->pcie_lane;
5648                 break;
5649         default:
5650                 break;
5651         }
5652 }
5653
5654 static int ci_parse_power_table(struct amdgpu_device *adev)
5655 {
5656         struct amdgpu_mode_info *mode_info = &adev->mode_info;
5657         struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5658         union pplib_power_state *power_state;
5659         int i, j, k, non_clock_array_index, clock_array_index;
5660         union pplib_clock_info *clock_info;
5661         struct _StateArray *state_array;
5662         struct _ClockInfoArray *clock_info_array;
5663         struct _NonClockInfoArray *non_clock_info_array;
5664         union power_info *power_info;
5665         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5666         u16 data_offset;
5667         u8 frev, crev;
5668         u8 *power_state_offset;
5669         struct ci_ps *ps;
5670
5671         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5672                                    &frev, &crev, &data_offset))
5673                 return -EINVAL;
5674         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5675
5676         amdgpu_add_thermal_controller(adev);
5677
5678         state_array = (struct _StateArray *)
5679                 (mode_info->atom_context->bios + data_offset +
5680                  le16_to_cpu(power_info->pplib.usStateArrayOffset));
5681         clock_info_array = (struct _ClockInfoArray *)
5682                 (mode_info->atom_context->bios + data_offset +
5683                  le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5684         non_clock_info_array = (struct _NonClockInfoArray *)
5685                 (mode_info->atom_context->bios + data_offset +
5686                  le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5687
5688         adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
5689                                   state_array->ucNumEntries, GFP_KERNEL);
5690         if (!adev->pm.dpm.ps)
5691                 return -ENOMEM;
5692         power_state_offset = (u8 *)state_array->states;
5693         for (i = 0; i < state_array->ucNumEntries; i++) {
5694                 u8 *idx;
5695                 power_state = (union pplib_power_state *)power_state_offset;
5696                 non_clock_array_index = power_state->v2.nonClockInfoIndex;
5697                 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5698                         &non_clock_info_array->nonClockInfo[non_clock_array_index];
5699                 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5700                 if (ps == NULL) {
5701                         kfree(adev->pm.dpm.ps);
5702                         return -ENOMEM;
5703                 }
5704                 adev->pm.dpm.ps[i].ps_priv = ps;
5705                 ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
5706                                               non_clock_info,
5707                                               non_clock_info_array->ucEntrySize);
5708                 k = 0;
5709                 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5710                 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5711                         clock_array_index = idx[j];
5712                         if (clock_array_index >= clock_info_array->ucNumEntries)
5713                                 continue;
5714                         if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5715                                 break;
5716                         clock_info = (union pplib_clock_info *)
5717                                 ((u8 *)&clock_info_array->clockInfo[0] +
5718                                  (clock_array_index * clock_info_array->ucEntrySize));
5719                         ci_parse_pplib_clock_info(adev,
5720                                                   &adev->pm.dpm.ps[i], k,
5721                                                   clock_info);
5722                         k++;
5723                 }
5724                 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5725         }
5726         adev->pm.dpm.num_ps = state_array->ucNumEntries;
5727
5728         /* fill in the vce power states */
5729         for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
5730                 u32 sclk, mclk;
5731                 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
5732                 clock_info = (union pplib_clock_info *)
5733                         &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5734                 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5735                 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5736                 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5737                 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5738                 adev->pm.dpm.vce_states[i].sclk = sclk;
5739                 adev->pm.dpm.vce_states[i].mclk = mclk;
5740         }
5741
5742         return 0;
5743 }
5744
5745 static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
5746                                     struct ci_vbios_boot_state *boot_state)
5747 {
5748         struct amdgpu_mode_info *mode_info = &adev->mode_info;
5749         int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5750         ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5751         u8 frev, crev;
5752         u16 data_offset;
5753
5754         if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5755                                    &frev, &crev, &data_offset)) {
5756                 firmware_info =
5757                         (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5758                                                     data_offset);
5759                 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5760                 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5761                 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5762                 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
5763                 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
5764                 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5765                 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5766
5767                 return 0;
5768         }
5769         return -EINVAL;
5770 }
5771
5772 static void ci_dpm_fini(struct amdgpu_device *adev)
5773 {
5774         int i;
5775
5776         for (i = 0; i < adev->pm.dpm.num_ps; i++) {
5777                 kfree(adev->pm.dpm.ps[i].ps_priv);
5778         }
5779         kfree(adev->pm.dpm.ps);
5780         kfree(adev->pm.dpm.priv);
5781         kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5782         amdgpu_free_extended_power_table(adev);
5783 }
5784
5785 /**
5786  * ci_dpm_init_microcode - load ucode images from disk
5787  *
5788  * @adev: amdgpu_device pointer
5789  *
5790  * Use the firmware interface to load the ucode images into
5791  * the driver (not loaded into hw).
5792  * Returns 0 on success, error on failure.
5793  */
5794 static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5795 {
5796         const char *chip_name;
5797         char fw_name[30];
5798         int err;
5799
5800         DRM_DEBUG("\n");
5801
5802         switch (adev->asic_type) {
5803         case CHIP_BONAIRE:
5804                 chip_name = "bonaire";
5805                 break;
5806         case CHIP_HAWAII:
5807                 chip_name = "hawaii";
5808                 break;
5809         case CHIP_KAVERI:
5810         case CHIP_KABINI:
5811         default: BUG();
5812         }
5813
5814         snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
5815         err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
5816         if (err)
5817                 goto out;
5818         err = amdgpu_ucode_validate(adev->pm.fw);
5819
5820 out:
5821         if (err) {
5822                 printk(KERN_ERR
5823                        "cik_smc: Failed to load firmware \"%s\"\n",
5824                        fw_name);
5825                 release_firmware(adev->pm.fw);
5826                 adev->pm.fw = NULL;
5827         }
5828         return err;
5829 }
5830
5831 static int ci_dpm_init(struct amdgpu_device *adev)
5832 {
5833         int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5834         SMU7_Discrete_DpmTable *dpm_table;
5835         struct amdgpu_gpio_rec gpio;
5836         u16 data_offset, size;
5837         u8 frev, crev;
5838         struct ci_power_info *pi;
5839         int ret;
5840         u32 mask;
5841
5842         pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5843         if (pi == NULL)
5844                 return -ENOMEM;
5845         adev->pm.dpm.priv = pi;
5846
5847         ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
5848         if (ret)
5849                 pi->sys_pcie_mask = 0;
5850         else
5851                 pi->sys_pcie_mask = mask;
5852         pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5853
5854         pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
5855         pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
5856         pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
5857         pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
5858
5859         pi->pcie_lane_performance.max = 0;
5860         pi->pcie_lane_performance.min = 16;
5861         pi->pcie_lane_powersaving.max = 0;
5862         pi->pcie_lane_powersaving.min = 16;
5863
5864         ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
5865         if (ret) {
5866                 ci_dpm_fini(adev);
5867                 return ret;
5868         }
5869
5870         ret = amdgpu_get_platform_caps(adev);
5871         if (ret) {
5872                 ci_dpm_fini(adev);
5873                 return ret;
5874         }
5875
5876         ret = amdgpu_parse_extended_power_table(adev);
5877         if (ret) {
5878                 ci_dpm_fini(adev);
5879                 return ret;
5880         }
5881
5882         ret = ci_parse_power_table(adev);
5883         if (ret) {
5884                 ci_dpm_fini(adev);
5885                 return ret;
5886         }
5887
5888         pi->dll_default_on = false;
5889         pi->sram_end = SMC_RAM_END;
5890
5891         pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5892         pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5893         pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5894         pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5895         pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5896         pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5897         pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5898         pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5899
5900         pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5901
5902         pi->sclk_dpm_key_disabled = 0;
5903         pi->mclk_dpm_key_disabled = 0;
5904         pi->pcie_dpm_key_disabled = 0;
5905         pi->thermal_sclk_dpm_enabled = 0;
5906
5907         pi->caps_sclk_ds = true;
5908
5909         pi->mclk_strobe_mode_threshold = 40000;
5910         pi->mclk_stutter_mode_threshold = 40000;
5911         pi->mclk_edc_enable_threshold = 40000;
5912         pi->mclk_edc_wr_enable_threshold = 40000;
5913
5914         ci_initialize_powertune_defaults(adev);
5915
5916         pi->caps_fps = false;
5917
5918         pi->caps_sclk_throttle_low_notification = false;
5919
5920         pi->caps_uvd_dpm = true;
5921         pi->caps_vce_dpm = true;
5922
5923         ci_get_leakage_voltages(adev);
5924         ci_patch_dependency_tables_with_leakage(adev);
5925         ci_set_private_data_variables_based_on_pptable(adev);
5926
5927         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5928                 kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
5929         if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5930                 ci_dpm_fini(adev);
5931                 return -ENOMEM;
5932         }
5933         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5934         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5935         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5936         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5937         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5938         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5939         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5940         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5941         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5942
5943         adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5944         adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5945         adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5946
5947         adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5948         adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5949         adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5950         adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5951
5952         if (adev->asic_type == CHIP_HAWAII) {
5953                 pi->thermal_temp_setting.temperature_low = 94500;
5954                 pi->thermal_temp_setting.temperature_high = 95000;
5955                 pi->thermal_temp_setting.temperature_shutdown = 104000;
5956         } else {
5957                 pi->thermal_temp_setting.temperature_low = 99500;
5958                 pi->thermal_temp_setting.temperature_high = 100000;
5959                 pi->thermal_temp_setting.temperature_shutdown = 104000;
5960         }
5961
5962         pi->uvd_enabled = false;
5963
5964         dpm_table = &pi->smc_state_table;
5965
5966         gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
5967         if (gpio.valid) {
5968                 dpm_table->VRHotGpio = gpio.shift;
5969                 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5970         } else {
5971                 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5972                 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5973         }
5974
5975         gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
5976         if (gpio.valid) {
5977                 dpm_table->AcDcGpio = gpio.shift;
5978                 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5979         } else {
5980                 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5981                 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5982         }
5983
5984         gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
5985         if (gpio.valid) {
5986                 u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
5987
5988                 switch (gpio.shift) {
5989                 case 0:
5990                         tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5991                         tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5992                         break;
5993                 case 1:
5994                         tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5995                         tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5996                         break;
5997                 case 2:
5998                         tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
5999                         break;
6000                 case 3:
6001                         tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
6002                         break;
6003                 case 4:
6004                         tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
6005                         break;
6006                 default:
6007                         DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
6008                         break;
6009                 }
6010                 WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
6011         }
6012
6013         pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6014         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6015         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6016         if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
6017                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6018         else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
6019                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6020
6021         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
6022                 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
6023                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6024                 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
6025                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6026                 else
6027                         adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
6028         }
6029
6030         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
6031                 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
6032                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6033                 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
6034                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6035                 else
6036                         adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
6037         }
6038
6039         pi->vddc_phase_shed_control = true;
6040
6041 #if defined(CONFIG_ACPI)
6042         pi->pcie_performance_request =
6043                 amdgpu_acpi_is_pcie_performance_request_supported(adev);
6044 #else
6045         pi->pcie_performance_request = false;
6046 #endif
6047
6048         if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
6049                                    &frev, &crev, &data_offset)) {
6050                 pi->caps_sclk_ss_support = true;
6051                 pi->caps_mclk_ss_support = true;
6052                 pi->dynamic_ss = true;
6053         } else {
6054                 pi->caps_sclk_ss_support = false;
6055                 pi->caps_mclk_ss_support = false;
6056                 pi->dynamic_ss = true;
6057         }
6058
6059         if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
6060                 pi->thermal_protection = true;
6061         else
6062                 pi->thermal_protection = false;
6063
6064         pi->caps_dynamic_ac_timing = true;
6065
6066         pi->uvd_power_gated = false;
6067
6068         /* make sure dc limits are valid */
6069         if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
6070             (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
6071                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6072                         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6073
6074         pi->fan_ctrl_is_in_default_mode = true;
6075
6076         return 0;
6077 }
6078
6079 static void
6080 ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
6081                                                struct seq_file *m)
6082 {
6083         struct ci_power_info *pi = ci_get_pi(adev);
6084         struct amdgpu_ps *rps = &pi->current_rps;
6085         u32 sclk = ci_get_average_sclk_freq(adev);
6086         u32 mclk = ci_get_average_mclk_freq(adev);
6087
6088         seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis");
6089         seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
6090         seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
6091                    sclk, mclk);
6092 }
6093
6094 static void ci_dpm_print_power_state(struct amdgpu_device *adev,
6095                                      struct amdgpu_ps *rps)
6096 {
6097         struct ci_ps *ps = ci_get_ps(rps);
6098         struct ci_pl *pl;
6099         int i;
6100
6101         amdgpu_dpm_print_class_info(rps->class, rps->class2);
6102         amdgpu_dpm_print_cap_info(rps->caps);
6103         printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
6104         for (i = 0; i < ps->performance_level_count; i++) {
6105                 pl = &ps->performance_levels[i];
6106                 printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
6107                        i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
6108         }
6109         amdgpu_dpm_print_ps_status(adev, rps);
6110 }
6111
6112 static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
6113 {
6114         struct ci_power_info *pi = ci_get_pi(adev);
6115         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6116
6117         if (low)
6118                 return requested_state->performance_levels[0].sclk;
6119         else
6120                 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
6121 }
6122
6123 static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
6124 {
6125         struct ci_power_info *pi = ci_get_pi(adev);
6126         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6127
6128         if (low)
6129                 return requested_state->performance_levels[0].mclk;
6130         else
6131                 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6132 }
6133
6134 /* get temperature in millidegrees */
6135 static int ci_dpm_get_temp(struct amdgpu_device *adev)
6136 {
6137         u32 temp;
6138         int actual_temp = 0;
6139
6140         temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
6141                 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
6142
6143         if (temp & 0x200)
6144                 actual_temp = 255;
6145         else
6146                 actual_temp = temp & 0x1ff;
6147
6148         actual_temp = actual_temp * 1000;
6149
6150         return actual_temp;
6151 }
6152
6153 static int ci_set_temperature_range(struct amdgpu_device *adev)
6154 {
6155         int ret;
6156
6157         ret = ci_thermal_enable_alert(adev, false);
6158         if (ret)
6159                 return ret;
6160         ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
6161                                                CISLANDS_TEMP_RANGE_MAX);
6162         if (ret)
6163                 return ret;
6164         ret = ci_thermal_enable_alert(adev, true);
6165         if (ret)
6166                 return ret;
6167         return ret;
6168 }
6169
6170 static int ci_dpm_early_init(void *handle)
6171 {
6172         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6173
6174         ci_dpm_set_dpm_funcs(adev);
6175         ci_dpm_set_irq_funcs(adev);
6176
6177         return 0;
6178 }
6179
6180 static int ci_dpm_late_init(void *handle)
6181 {
6182         int ret;
6183         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6184
6185         if (!amdgpu_dpm)
6186                 return 0;
6187
6188         /* init the sysfs and debugfs files late */
6189         ret = amdgpu_pm_sysfs_init(adev);
6190         if (ret)
6191                 return ret;
6192
6193         ret = ci_set_temperature_range(adev);
6194         if (ret)
6195                 return ret;
6196
6197         ci_dpm_powergate_uvd(adev, true);
6198
6199         return 0;
6200 }
6201
6202 static int ci_dpm_sw_init(void *handle)
6203 {
6204         int ret;
6205         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6206
6207         ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
6208         if (ret)
6209                 return ret;
6210
6211         ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
6212         if (ret)
6213                 return ret;
6214
6215         /* default to balanced state */
6216         adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
6217         adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
6218         adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
6219         adev->pm.default_sclk = adev->clock.default_sclk;
6220         adev->pm.default_mclk = adev->clock.default_mclk;
6221         adev->pm.current_sclk = adev->clock.default_sclk;
6222         adev->pm.current_mclk = adev->clock.default_mclk;
6223         adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
6224
6225         if (amdgpu_dpm == 0)
6226                 return 0;
6227
6228         ret = ci_dpm_init_microcode(adev);
6229         if (ret)
6230                 return ret;
6231
6232         INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
6233         mutex_lock(&adev->pm.mutex);
6234         ret = ci_dpm_init(adev);
6235         if (ret)
6236                 goto dpm_failed;
6237         adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6238         if (amdgpu_dpm == 1)
6239                 amdgpu_pm_print_power_states(adev);
6240         mutex_unlock(&adev->pm.mutex);
6241         DRM_INFO("amdgpu: dpm initialized\n");
6242
6243         return 0;
6244
6245 dpm_failed:
6246         ci_dpm_fini(adev);
6247         mutex_unlock(&adev->pm.mutex);
6248         DRM_ERROR("amdgpu: dpm initialization failed\n");
6249         return ret;
6250 }
6251
6252 static int ci_dpm_sw_fini(void *handle)
6253 {
6254         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6255
6256         mutex_lock(&adev->pm.mutex);
6257         amdgpu_pm_sysfs_fini(adev);
6258         ci_dpm_fini(adev);
6259         mutex_unlock(&adev->pm.mutex);
6260
6261         return 0;
6262 }
6263
6264 static int ci_dpm_hw_init(void *handle)
6265 {
6266         int ret;
6267
6268         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6269
6270         if (!amdgpu_dpm)
6271                 return 0;
6272
6273         mutex_lock(&adev->pm.mutex);
6274         ci_dpm_setup_asic(adev);
6275         ret = ci_dpm_enable(adev);
6276         if (ret)
6277                 adev->pm.dpm_enabled = false;
6278         else
6279                 adev->pm.dpm_enabled = true;
6280         mutex_unlock(&adev->pm.mutex);
6281
6282         return ret;
6283 }
6284
6285 static int ci_dpm_hw_fini(void *handle)
6286 {
6287         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6288
6289         if (adev->pm.dpm_enabled) {
6290                 mutex_lock(&adev->pm.mutex);
6291                 ci_dpm_disable(adev);
6292                 mutex_unlock(&adev->pm.mutex);
6293         }
6294
6295         return 0;
6296 }
6297
6298 static int ci_dpm_suspend(void *handle)
6299 {
6300         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6301
6302         if (adev->pm.dpm_enabled) {
6303                 mutex_lock(&adev->pm.mutex);
6304                 /* disable dpm */
6305                 ci_dpm_disable(adev);
6306                 /* reset the power state */
6307                 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6308                 mutex_unlock(&adev->pm.mutex);
6309         }
6310         return 0;
6311 }
6312
6313 static int ci_dpm_resume(void *handle)
6314 {
6315         int ret;
6316         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6317
6318         if (adev->pm.dpm_enabled) {
6319                 /* asic init will reset to the boot state */
6320                 mutex_lock(&adev->pm.mutex);
6321                 ci_dpm_setup_asic(adev);
6322                 ret = ci_dpm_enable(adev);
6323                 if (ret)
6324                         adev->pm.dpm_enabled = false;
6325                 else
6326                         adev->pm.dpm_enabled = true;
6327                 mutex_unlock(&adev->pm.mutex);
6328                 if (adev->pm.dpm_enabled)
6329                         amdgpu_pm_compute_clocks(adev);
6330         }
6331         return 0;
6332 }
6333
6334 static bool ci_dpm_is_idle(void *handle)
6335 {
6336         /* XXX */
6337         return true;
6338 }
6339
6340 static int ci_dpm_wait_for_idle(void *handle)
6341 {
6342         /* XXX */
6343         return 0;
6344 }
6345
6346 static void ci_dpm_print_status(void *handle)
6347 {
6348         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6349
6350         dev_info(adev->dev, "CIK DPM registers\n");
6351         dev_info(adev->dev, "  BIOS_SCRATCH_4=0x%08X\n",
6352                  RREG32(mmBIOS_SCRATCH_4));
6353         dev_info(adev->dev, "  MC_ARB_DRAM_TIMING=0x%08X\n",
6354                  RREG32(mmMC_ARB_DRAM_TIMING));
6355         dev_info(adev->dev, "  MC_ARB_DRAM_TIMING2=0x%08X\n",
6356                  RREG32(mmMC_ARB_DRAM_TIMING2));
6357         dev_info(adev->dev, "  MC_ARB_BURST_TIME=0x%08X\n",
6358                  RREG32(mmMC_ARB_BURST_TIME));
6359         dev_info(adev->dev, "  MC_ARB_DRAM_TIMING_1=0x%08X\n",
6360                  RREG32(mmMC_ARB_DRAM_TIMING_1));
6361         dev_info(adev->dev, "  MC_ARB_DRAM_TIMING2_1=0x%08X\n",
6362                  RREG32(mmMC_ARB_DRAM_TIMING2_1));
6363         dev_info(adev->dev, "  MC_CG_CONFIG=0x%08X\n",
6364                  RREG32(mmMC_CG_CONFIG));
6365         dev_info(adev->dev, "  MC_ARB_CG=0x%08X\n",
6366                  RREG32(mmMC_ARB_CG));
6367         dev_info(adev->dev, "  DIDT_SQ_CTRL0=0x%08X\n",
6368                  RREG32_DIDT(ixDIDT_SQ_CTRL0));
6369         dev_info(adev->dev, "  DIDT_DB_CTRL0=0x%08X\n",
6370                  RREG32_DIDT(ixDIDT_DB_CTRL0));
6371         dev_info(adev->dev, "  DIDT_TD_CTRL0=0x%08X\n",
6372                  RREG32_DIDT(ixDIDT_TD_CTRL0));
6373         dev_info(adev->dev, "  DIDT_TCP_CTRL0=0x%08X\n",
6374                  RREG32_DIDT(ixDIDT_TCP_CTRL0));
6375         dev_info(adev->dev, "  CG_THERMAL_INT=0x%08X\n",
6376                  RREG32_SMC(ixCG_THERMAL_INT));
6377         dev_info(adev->dev, "  CG_THERMAL_CTRL=0x%08X\n",
6378                  RREG32_SMC(ixCG_THERMAL_CTRL));
6379         dev_info(adev->dev, "  GENERAL_PWRMGT=0x%08X\n",
6380                  RREG32_SMC(ixGENERAL_PWRMGT));
6381         dev_info(adev->dev, "  MC_SEQ_CNTL_3=0x%08X\n",
6382                  RREG32(mmMC_SEQ_CNTL_3));
6383         dev_info(adev->dev, "  LCAC_MC0_CNTL=0x%08X\n",
6384                  RREG32_SMC(ixLCAC_MC0_CNTL));
6385         dev_info(adev->dev, "  LCAC_MC1_CNTL=0x%08X\n",
6386                  RREG32_SMC(ixLCAC_MC1_CNTL));
6387         dev_info(adev->dev, "  LCAC_CPL_CNTL=0x%08X\n",
6388                  RREG32_SMC(ixLCAC_CPL_CNTL));
6389         dev_info(adev->dev, "  SCLK_PWRMGT_CNTL=0x%08X\n",
6390                  RREG32_SMC(ixSCLK_PWRMGT_CNTL));
6391         dev_info(adev->dev, "  BIF_LNCNT_RESET=0x%08X\n",
6392                  RREG32(mmBIF_LNCNT_RESET));
6393         dev_info(adev->dev, "  FIRMWARE_FLAGS=0x%08X\n",
6394                  RREG32_SMC(ixFIRMWARE_FLAGS));
6395         dev_info(adev->dev, "  CG_SPLL_FUNC_CNTL=0x%08X\n",
6396                  RREG32_SMC(ixCG_SPLL_FUNC_CNTL));
6397         dev_info(adev->dev, "  CG_SPLL_FUNC_CNTL_2=0x%08X\n",
6398                  RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2));
6399         dev_info(adev->dev, "  CG_SPLL_FUNC_CNTL_3=0x%08X\n",
6400                  RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3));
6401         dev_info(adev->dev, "  CG_SPLL_FUNC_CNTL_4=0x%08X\n",
6402                  RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4));
6403         dev_info(adev->dev, "  CG_SPLL_SPREAD_SPECTRUM=0x%08X\n",
6404                  RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM));
6405         dev_info(adev->dev, "  CG_SPLL_SPREAD_SPECTRUM_2=0x%08X\n",
6406                  RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2));
6407         dev_info(adev->dev, "  DLL_CNTL=0x%08X\n",
6408                  RREG32(mmDLL_CNTL));
6409         dev_info(adev->dev, "  MCLK_PWRMGT_CNTL=0x%08X\n",
6410                  RREG32(mmMCLK_PWRMGT_CNTL));
6411         dev_info(adev->dev, "  MPLL_AD_FUNC_CNTL=0x%08X\n",
6412                  RREG32(mmMPLL_AD_FUNC_CNTL));
6413         dev_info(adev->dev, "  MPLL_DQ_FUNC_CNTL=0x%08X\n",
6414                  RREG32(mmMPLL_DQ_FUNC_CNTL));
6415         dev_info(adev->dev, "  MPLL_FUNC_CNTL=0x%08X\n",
6416                  RREG32(mmMPLL_FUNC_CNTL));
6417         dev_info(adev->dev, "  MPLL_FUNC_CNTL_1=0x%08X\n",
6418                  RREG32(mmMPLL_FUNC_CNTL_1));
6419         dev_info(adev->dev, "  MPLL_FUNC_CNTL_2=0x%08X\n",
6420                  RREG32(mmMPLL_FUNC_CNTL_2));
6421         dev_info(adev->dev, "  MPLL_SS1=0x%08X\n",
6422                  RREG32(mmMPLL_SS1));
6423         dev_info(adev->dev, "  MPLL_SS2=0x%08X\n",
6424                  RREG32(mmMPLL_SS2));
6425         dev_info(adev->dev, "  CG_DISPLAY_GAP_CNTL=0x%08X\n",
6426                  RREG32_SMC(ixCG_DISPLAY_GAP_CNTL));
6427         dev_info(adev->dev, "  CG_DISPLAY_GAP_CNTL2=0x%08X\n",
6428                  RREG32_SMC(ixCG_DISPLAY_GAP_CNTL2));
6429         dev_info(adev->dev, "  CG_STATIC_SCREEN_PARAMETER=0x%08X\n",
6430                  RREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER));
6431         dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_0=0x%08X\n",
6432                  RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0));
6433         dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_1=0x%08X\n",
6434                  RREG32_SMC(ixCG_FREQ_TRAN_VOTING_1));
6435         dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_2=0x%08X\n",
6436                  RREG32_SMC(ixCG_FREQ_TRAN_VOTING_2));
6437         dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_3=0x%08X\n",
6438                  RREG32_SMC(ixCG_FREQ_TRAN_VOTING_3));
6439         dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_4=0x%08X\n",
6440                  RREG32_SMC(ixCG_FREQ_TRAN_VOTING_4));
6441         dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_5=0x%08X\n",
6442                  RREG32_SMC(ixCG_FREQ_TRAN_VOTING_5));
6443         dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_6=0x%08X\n",
6444                  RREG32_SMC(ixCG_FREQ_TRAN_VOTING_6));
6445         dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_7=0x%08X\n",
6446                  RREG32_SMC(ixCG_FREQ_TRAN_VOTING_7));
6447         dev_info(adev->dev, "  RCU_UC_EVENTS=0x%08X\n",
6448                  RREG32_SMC(ixRCU_UC_EVENTS));
6449         dev_info(adev->dev, "  DPM_TABLE_475=0x%08X\n",
6450                  RREG32_SMC(ixDPM_TABLE_475));
6451         dev_info(adev->dev, "  MC_SEQ_RAS_TIMING_LP=0x%08X\n",
6452                  RREG32(mmMC_SEQ_RAS_TIMING_LP));
6453         dev_info(adev->dev, "  MC_SEQ_RAS_TIMING=0x%08X\n",
6454                  RREG32(mmMC_SEQ_RAS_TIMING));
6455         dev_info(adev->dev, "  MC_SEQ_CAS_TIMING_LP=0x%08X\n",
6456                  RREG32(mmMC_SEQ_CAS_TIMING_LP));
6457         dev_info(adev->dev, "  MC_SEQ_CAS_TIMING=0x%08X\n",
6458                  RREG32(mmMC_SEQ_CAS_TIMING));
6459         dev_info(adev->dev, "  MC_SEQ_DLL_STBY_LP=0x%08X\n",
6460                  RREG32(mmMC_SEQ_DLL_STBY_LP));
6461         dev_info(adev->dev, "  MC_SEQ_DLL_STBY=0x%08X\n",
6462                  RREG32(mmMC_SEQ_DLL_STBY));
6463         dev_info(adev->dev, "  MC_SEQ_G5PDX_CMD0_LP=0x%08X\n",
6464                  RREG32(mmMC_SEQ_G5PDX_CMD0_LP));
6465         dev_info(adev->dev, "  MC_SEQ_G5PDX_CMD0=0x%08X\n",
6466                  RREG32(mmMC_SEQ_G5PDX_CMD0));
6467         dev_info(adev->dev, "  MC_SEQ_G5PDX_CMD1_LP=0x%08X\n",
6468                  RREG32(mmMC_SEQ_G5PDX_CMD1_LP));
6469         dev_info(adev->dev, "  MC_SEQ_G5PDX_CMD1=0x%08X\n",
6470                  RREG32(mmMC_SEQ_G5PDX_CMD1));
6471         dev_info(adev->dev, "  MC_SEQ_G5PDX_CTRL_LP=0x%08X\n",
6472                  RREG32(mmMC_SEQ_G5PDX_CTRL_LP));
6473         dev_info(adev->dev, "  MC_SEQ_G5PDX_CTRL=0x%08X\n",
6474                  RREG32(mmMC_SEQ_G5PDX_CTRL));
6475         dev_info(adev->dev, "  MC_SEQ_PMG_DVS_CMD_LP=0x%08X\n",
6476                  RREG32(mmMC_SEQ_PMG_DVS_CMD_LP));
6477         dev_info(adev->dev, "  MC_SEQ_PMG_DVS_CMD=0x%08X\n",
6478                  RREG32(mmMC_SEQ_PMG_DVS_CMD));
6479         dev_info(adev->dev, "  MC_SEQ_PMG_DVS_CTL_LP=0x%08X\n",
6480                  RREG32(mmMC_SEQ_PMG_DVS_CTL_LP));
6481         dev_info(adev->dev, "  MC_SEQ_PMG_DVS_CTL=0x%08X\n",
6482                  RREG32(mmMC_SEQ_PMG_DVS_CTL));
6483         dev_info(adev->dev, "  MC_SEQ_MISC_TIMING_LP=0x%08X\n",
6484                  RREG32(mmMC_SEQ_MISC_TIMING_LP));
6485         dev_info(adev->dev, "  MC_SEQ_MISC_TIMING=0x%08X\n",
6486                  RREG32(mmMC_SEQ_MISC_TIMING));
6487         dev_info(adev->dev, "  MC_SEQ_MISC_TIMING2_LP=0x%08X\n",
6488                  RREG32(mmMC_SEQ_MISC_TIMING2_LP));
6489         dev_info(adev->dev, "  MC_SEQ_MISC_TIMING2=0x%08X\n",
6490                  RREG32(mmMC_SEQ_MISC_TIMING2));
6491         dev_info(adev->dev, "  MC_SEQ_PMG_CMD_EMRS_LP=0x%08X\n",
6492                  RREG32(mmMC_SEQ_PMG_CMD_EMRS_LP));
6493         dev_info(adev->dev, "  MC_PMG_CMD_EMRS=0x%08X\n",
6494                  RREG32(mmMC_PMG_CMD_EMRS));
6495         dev_info(adev->dev, "  MC_SEQ_PMG_CMD_MRS_LP=0x%08X\n",
6496                  RREG32(mmMC_SEQ_PMG_CMD_MRS_LP));
6497         dev_info(adev->dev, "  MC_PMG_CMD_MRS=0x%08X\n",
6498                  RREG32(mmMC_PMG_CMD_MRS));
6499         dev_info(adev->dev, "  MC_SEQ_PMG_CMD_MRS1_LP=0x%08X\n",
6500                  RREG32(mmMC_SEQ_PMG_CMD_MRS1_LP));
6501         dev_info(adev->dev, "  MC_PMG_CMD_MRS1=0x%08X\n",
6502                  RREG32(mmMC_PMG_CMD_MRS1));
6503         dev_info(adev->dev, "  MC_SEQ_WR_CTL_D0_LP=0x%08X\n",
6504                  RREG32(mmMC_SEQ_WR_CTL_D0_LP));
6505         dev_info(adev->dev, "  MC_SEQ_WR_CTL_D0=0x%08X\n",
6506                  RREG32(mmMC_SEQ_WR_CTL_D0));
6507         dev_info(adev->dev, "  MC_SEQ_WR_CTL_D1_LP=0x%08X\n",
6508                  RREG32(mmMC_SEQ_WR_CTL_D1_LP));
6509         dev_info(adev->dev, "  MC_SEQ_WR_CTL_D1=0x%08X\n",
6510                  RREG32(mmMC_SEQ_WR_CTL_D1));
6511         dev_info(adev->dev, "  MC_SEQ_RD_CTL_D0_LP=0x%08X\n",
6512                  RREG32(mmMC_SEQ_RD_CTL_D0_LP));
6513         dev_info(adev->dev, "  MC_SEQ_RD_CTL_D0=0x%08X\n",
6514                  RREG32(mmMC_SEQ_RD_CTL_D0));
6515         dev_info(adev->dev, "  MC_SEQ_RD_CTL_D1_LP=0x%08X\n",
6516                  RREG32(mmMC_SEQ_RD_CTL_D1_LP));
6517         dev_info(adev->dev, "  MC_SEQ_RD_CTL_D1=0x%08X\n",
6518                  RREG32(mmMC_SEQ_RD_CTL_D1));
6519         dev_info(adev->dev, "  MC_SEQ_PMG_TIMING_LP=0x%08X\n",
6520                  RREG32(mmMC_SEQ_PMG_TIMING_LP));
6521         dev_info(adev->dev, "  MC_SEQ_PMG_TIMING=0x%08X\n",
6522                  RREG32(mmMC_SEQ_PMG_TIMING));
6523         dev_info(adev->dev, "  MC_SEQ_PMG_CMD_MRS2_LP=0x%08X\n",
6524                  RREG32(mmMC_SEQ_PMG_CMD_MRS2_LP));
6525         dev_info(adev->dev, "  MC_PMG_CMD_MRS2=0x%08X\n",
6526                  RREG32(mmMC_PMG_CMD_MRS2));
6527         dev_info(adev->dev, "  MC_SEQ_WR_CTL_2_LP=0x%08X\n",
6528                  RREG32(mmMC_SEQ_WR_CTL_2_LP));
6529         dev_info(adev->dev, "  MC_SEQ_WR_CTL_2=0x%08X\n",
6530                  RREG32(mmMC_SEQ_WR_CTL_2));
6531         dev_info(adev->dev, "  PCIE_LC_SPEED_CNTL=0x%08X\n",
6532                  RREG32_PCIE(ixPCIE_LC_SPEED_CNTL));
6533         dev_info(adev->dev, "  PCIE_LC_LINK_WIDTH_CNTL=0x%08X\n",
6534                  RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL));
6535         dev_info(adev->dev, "  SMC_IND_INDEX_0=0x%08X\n",
6536                  RREG32(mmSMC_IND_INDEX_0));
6537         dev_info(adev->dev, "  SMC_IND_DATA_0=0x%08X\n",
6538                  RREG32(mmSMC_IND_DATA_0));
6539         dev_info(adev->dev, "  SMC_IND_ACCESS_CNTL=0x%08X\n",
6540                  RREG32(mmSMC_IND_ACCESS_CNTL));
6541         dev_info(adev->dev, "  SMC_RESP_0=0x%08X\n",
6542                  RREG32(mmSMC_RESP_0));
6543         dev_info(adev->dev, "  SMC_MESSAGE_0=0x%08X\n",
6544                  RREG32(mmSMC_MESSAGE_0));
6545         dev_info(adev->dev, "  SMC_SYSCON_RESET_CNTL=0x%08X\n",
6546                  RREG32_SMC(ixSMC_SYSCON_RESET_CNTL));
6547         dev_info(adev->dev, "  SMC_SYSCON_CLOCK_CNTL_0=0x%08X\n",
6548                  RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0));
6549         dev_info(adev->dev, "  SMC_SYSCON_MISC_CNTL=0x%08X\n",
6550                  RREG32_SMC(ixSMC_SYSCON_MISC_CNTL));
6551         dev_info(adev->dev, "  SMC_PC_C=0x%08X\n",
6552                  RREG32_SMC(ixSMC_PC_C));
6553 }
6554
6555 static int ci_dpm_soft_reset(void *handle)
6556 {
6557         return 0;
6558 }
6559
6560 static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6561                                       struct amdgpu_irq_src *source,
6562                                       unsigned type,
6563                                       enum amdgpu_interrupt_state state)
6564 {
6565         u32 cg_thermal_int;
6566
6567         switch (type) {
6568         case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
6569                 switch (state) {
6570                 case AMDGPU_IRQ_STATE_DISABLE:
6571                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6572                         cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6573                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6574                         break;
6575                 case AMDGPU_IRQ_STATE_ENABLE:
6576                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6577                         cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6578                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6579                         break;
6580                 default:
6581                         break;
6582                 }
6583                 break;
6584
6585         case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
6586                 switch (state) {
6587                 case AMDGPU_IRQ_STATE_DISABLE:
6588                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6589                         cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6590                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6591                         break;
6592                 case AMDGPU_IRQ_STATE_ENABLE:
6593                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6594                         cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6595                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6596                         break;
6597                 default:
6598                         break;
6599                 }
6600                 break;
6601
6602         default:
6603                 break;
6604         }
6605         return 0;
6606 }
6607
6608 static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
6609                                     struct amdgpu_irq_src *source, 
6610                                     struct amdgpu_iv_entry *entry)
6611 {
6612         bool queue_thermal = false;
6613
6614         if (entry == NULL)
6615                 return -EINVAL;
6616
6617         switch (entry->src_id) {
6618         case 230: /* thermal low to high */
6619                 DRM_DEBUG("IH: thermal low to high\n");
6620                 adev->pm.dpm.thermal.high_to_low = false;
6621                 queue_thermal = true;
6622                 break;
6623         case 231: /* thermal high to low */
6624                 DRM_DEBUG("IH: thermal high to low\n");
6625                 adev->pm.dpm.thermal.high_to_low = true;
6626                 queue_thermal = true;
6627                 break;
6628         default:
6629                 break;
6630         }
6631
6632         if (queue_thermal)
6633                 schedule_work(&adev->pm.dpm.thermal.work);
6634
6635         return 0;
6636 }
6637
6638 static int ci_dpm_set_clockgating_state(void *handle,
6639                                           enum amd_clockgating_state state)
6640 {
6641         return 0;
6642 }
6643
6644 static int ci_dpm_set_powergating_state(void *handle,
6645                                           enum amd_powergating_state state)
6646 {
6647         return 0;
6648 }
6649
6650 const struct amd_ip_funcs ci_dpm_ip_funcs = {
6651         .early_init = ci_dpm_early_init,
6652         .late_init = ci_dpm_late_init,
6653         .sw_init = ci_dpm_sw_init,
6654         .sw_fini = ci_dpm_sw_fini,
6655         .hw_init = ci_dpm_hw_init,
6656         .hw_fini = ci_dpm_hw_fini,
6657         .suspend = ci_dpm_suspend,
6658         .resume = ci_dpm_resume,
6659         .is_idle = ci_dpm_is_idle,
6660         .wait_for_idle = ci_dpm_wait_for_idle,
6661         .soft_reset = ci_dpm_soft_reset,
6662         .print_status = ci_dpm_print_status,
6663         .set_clockgating_state = ci_dpm_set_clockgating_state,
6664         .set_powergating_state = ci_dpm_set_powergating_state,
6665 };
6666
6667 static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
6668         .get_temperature = &ci_dpm_get_temp,
6669         .pre_set_power_state = &ci_dpm_pre_set_power_state,
6670         .set_power_state = &ci_dpm_set_power_state,
6671         .post_set_power_state = &ci_dpm_post_set_power_state,
6672         .display_configuration_changed = &ci_dpm_display_configuration_changed,
6673         .get_sclk = &ci_dpm_get_sclk,
6674         .get_mclk = &ci_dpm_get_mclk,
6675         .print_power_state = &ci_dpm_print_power_state,
6676         .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
6677         .force_performance_level = &ci_dpm_force_performance_level,
6678         .vblank_too_short = &ci_dpm_vblank_too_short,
6679         .powergate_uvd = &ci_dpm_powergate_uvd,
6680         .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
6681         .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
6682         .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
6683         .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
6684 };
6685
6686 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
6687 {
6688         if (adev->pm.funcs == NULL)
6689                 adev->pm.funcs = &ci_dpm_funcs;
6690 }
6691
6692 static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
6693         .set = ci_dpm_set_interrupt_state,
6694         .process = ci_dpm_process_interrupt,
6695 };
6696
6697 static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
6698 {
6699         adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
6700         adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
6701 }