]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/radeon/ci_dpm.c
arm: imx6: defconfig: update tx6 defconfigs
[karo-tx-linux.git] / drivers / gpu / drm / radeon / ci_dpm.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include "drmP.h"
25 #include "radeon.h"
26 #include "cikd.h"
27 #include "r600_dpm.h"
28 #include "ci_dpm.h"
29 #include "atom.h"
30 #include <linux/seq_file.h>
31
32 #define MC_CG_ARB_FREQ_F0           0x0a
33 #define MC_CG_ARB_FREQ_F1           0x0b
34 #define MC_CG_ARB_FREQ_F2           0x0c
35 #define MC_CG_ARB_FREQ_F3           0x0d
36
37 #define SMC_RAM_END 0x40000
38
39 #define VOLTAGE_SCALE               4
40 #define VOLTAGE_VID_OFFSET_SCALE1    625
41 #define VOLTAGE_VID_OFFSET_SCALE2    100
42
43 static const struct ci_pt_defaults defaults_bonaire_xt =
44 {
45         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
46         { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
47         { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
48 };
49
50 static const struct ci_pt_defaults defaults_bonaire_pro =
51 {
52         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
53         { 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
54         { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
55 };
56
57 static const struct ci_pt_defaults defaults_saturn_xt =
58 {
59         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
60         { 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
61         { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
62 };
63
64 static const struct ci_pt_defaults defaults_saturn_pro =
65 {
66         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
67         { 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
68         { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
69 };
70
71 static const struct ci_pt_config_reg didt_config_ci[] =
72 {
73         { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
74         { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
75         { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
76         { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
77         { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
78         { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
79         { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
80         { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
81         { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
82         { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
83         { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
84         { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
85         { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
86         { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
87         { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
88         { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
89         { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
90         { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
91         { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
92         { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
93         { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
94         { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95         { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96         { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97         { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98         { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99         { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100         { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101         { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
102         { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
103         { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
104         { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
105         { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
106         { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
107         { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
108         { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
109         { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110         { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111         { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112         { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113         { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114         { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115         { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116         { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117         { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118         { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119         { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120         { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121         { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
122         { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
123         { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
124         { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
125         { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
126         { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127         { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128         { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129         { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130         { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131         { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132         { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133         { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134         { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135         { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136         { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137         { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138         { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139         { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
140         { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
141         { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
142         { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
143         { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
144         { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145         { 0xFFFFFFFF }
146 };
147
148 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
149 extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
150                                                             u32 *max_clock);
151 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
152                                        u32 arb_freq_src, u32 arb_freq_dest);
153 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
154 extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
155 extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
156                                                      u32 max_voltage_steps,
157                                                      struct atom_voltage_table *voltage_table);
158 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
159 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
160 extern void cik_update_cg(struct radeon_device *rdev,
161                           u32 block, bool enable);
162
163 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
164                                          struct atom_voltage_table_entry *voltage_table,
165                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
166 static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
167 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
168                                        u32 target_tdp);
169 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
170
171 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
172 {
173         struct ci_power_info *pi = rdev->pm.dpm.priv;
174
175         return pi;
176 }
177
178 static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
179 {
180         struct ci_ps *ps = rps->ps_priv;
181
182         return ps;
183 }
184
185 static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
186 {
187         struct ci_power_info *pi = ci_get_pi(rdev);
188
189         switch (rdev->pdev->device) {
190         case 0x6650:
191         case 0x6658:
192         case 0x665C:
193         default:
194                 pi->powertune_defaults = &defaults_bonaire_xt;
195                 break;
196         case 0x6651:
197         case 0x665D:
198                 pi->powertune_defaults = &defaults_bonaire_pro;
199                 break;
200         case 0x6640:
201                 pi->powertune_defaults = &defaults_saturn_xt;
202                 break;
203         case 0x6641:
204                 pi->powertune_defaults = &defaults_saturn_pro;
205                 break;
206         }
207
208         pi->dte_tj_offset = 0;
209
210         pi->caps_power_containment = true;
211         pi->caps_cac = false;
212         pi->caps_sq_ramping = false;
213         pi->caps_db_ramping = false;
214         pi->caps_td_ramping = false;
215         pi->caps_tcp_ramping = false;
216
217         if (pi->caps_power_containment) {
218                 pi->caps_cac = true;
219                 pi->enable_bapm_feature = true;
220                 pi->enable_tdc_limit_feature = true;
221                 pi->enable_pkg_pwr_tracking_feature = true;
222         }
223 }
224
225 static u8 ci_convert_to_vid(u16 vddc)
226 {
227         return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
228 }
229
230 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
231 {
232         struct ci_power_info *pi = ci_get_pi(rdev);
233         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
234         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
235         u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
236         u32 i;
237
238         if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
239                 return -EINVAL;
240         if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
241                 return -EINVAL;
242         if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
243             rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
244                 return -EINVAL;
245
246         for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
247                 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
248                         lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
249                         hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
250                         hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
251                 } else {
252                         lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
253                         hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
254                 }
255         }
256         return 0;
257 }
258
259 static int ci_populate_vddc_vid(struct radeon_device *rdev)
260 {
261         struct ci_power_info *pi = ci_get_pi(rdev);
262         u8 *vid = pi->smc_powertune_table.VddCVid;
263         u32 i;
264
265         if (pi->vddc_voltage_table.count > 8)
266                 return -EINVAL;
267
268         for (i = 0; i < pi->vddc_voltage_table.count; i++)
269                 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
270
271         return 0;
272 }
273
274 static int ci_populate_svi_load_line(struct radeon_device *rdev)
275 {
276         struct ci_power_info *pi = ci_get_pi(rdev);
277         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
278
279         pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
280         pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
281         pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
282         pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
283
284         return 0;
285 }
286
287 static int ci_populate_tdc_limit(struct radeon_device *rdev)
288 {
289         struct ci_power_info *pi = ci_get_pi(rdev);
290         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
291         u16 tdc_limit;
292
293         tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
294         pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
295         pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
296                 pt_defaults->tdc_vddc_throttle_release_limit_perc;
297         pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
298
299         return 0;
300 }
301
302 static int ci_populate_dw8(struct radeon_device *rdev)
303 {
304         struct ci_power_info *pi = ci_get_pi(rdev);
305         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
306         int ret;
307
308         ret = ci_read_smc_sram_dword(rdev,
309                                      SMU7_FIRMWARE_HEADER_LOCATION +
310                                      offsetof(SMU7_Firmware_Header, PmFuseTable) +
311                                      offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
312                                      (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
313                                      pi->sram_end);
314         if (ret)
315                 return -EINVAL;
316         else
317                 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
318
319         return 0;
320 }
321
322 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
323 {
324         struct ci_power_info *pi = ci_get_pi(rdev);
325         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
326         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
327         int i, min, max;
328
329         min = max = hi_vid[0];
330         for (i = 0; i < 8; i++) {
331                 if (0 != hi_vid[i]) {
332                         if (min > hi_vid[i])
333                                 min = hi_vid[i];
334                         if (max < hi_vid[i])
335                                 max = hi_vid[i];
336                 }
337
338                 if (0 != lo_vid[i]) {
339                         if (min > lo_vid[i])
340                                 min = lo_vid[i];
341                         if (max < lo_vid[i])
342                                 max = lo_vid[i];
343                 }
344         }
345
346         if ((min == 0) || (max == 0))
347                 return -EINVAL;
348         pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
349         pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
350
351         return 0;
352 }
353
354 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
355 {
356         struct ci_power_info *pi = ci_get_pi(rdev);
357         u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
358         u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
359         struct radeon_cac_tdp_table *cac_tdp_table =
360                 rdev->pm.dpm.dyn_state.cac_tdp_table;
361
362         hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
363         lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
364
365         pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
366         pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
367
368         return 0;
369 }
370
371 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
372 {
373         struct ci_power_info *pi = ci_get_pi(rdev);
374         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
375         SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
376         struct radeon_cac_tdp_table *cac_tdp_table =
377                 rdev->pm.dpm.dyn_state.cac_tdp_table;
378         struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
379         int i, j, k;
380         const u16 *def1;
381         const u16 *def2;
382
383         dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
384         dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
385
386         dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
387         dpm_table->GpuTjMax =
388                 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
389         dpm_table->GpuTjHyst = 8;
390
391         dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
392
393         if (ppm) {
394                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
395                 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
396         } else {
397                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
398                 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
399         }
400
401         dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
402         def1 = pt_defaults->bapmti_r;
403         def2 = pt_defaults->bapmti_rc;
404
405         for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
406                 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
407                         for (k = 0; k < SMU7_DTE_SINKS; k++) {
408                                 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
409                                 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
410                                 def1++;
411                                 def2++;
412                         }
413                 }
414         }
415
416         return 0;
417 }
418
419 static int ci_populate_pm_base(struct radeon_device *rdev)
420 {
421         struct ci_power_info *pi = ci_get_pi(rdev);
422         u32 pm_fuse_table_offset;
423         int ret;
424
425         if (pi->caps_power_containment) {
426                 ret = ci_read_smc_sram_dword(rdev,
427                                              SMU7_FIRMWARE_HEADER_LOCATION +
428                                              offsetof(SMU7_Firmware_Header, PmFuseTable),
429                                              &pm_fuse_table_offset, pi->sram_end);
430                 if (ret)
431                         return ret;
432                 ret = ci_populate_bapm_vddc_vid_sidd(rdev);
433                 if (ret)
434                         return ret;
435                 ret = ci_populate_vddc_vid(rdev);
436                 if (ret)
437                         return ret;
438                 ret = ci_populate_svi_load_line(rdev);
439                 if (ret)
440                         return ret;
441                 ret = ci_populate_tdc_limit(rdev);
442                 if (ret)
443                         return ret;
444                 ret = ci_populate_dw8(rdev);
445                 if (ret)
446                         return ret;
447                 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
448                 if (ret)
449                         return ret;
450                 ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
451                 if (ret)
452                         return ret;
453                 ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
454                                            (u8 *)&pi->smc_powertune_table,
455                                            sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
456                 if (ret)
457                         return ret;
458         }
459
460         return 0;
461 }
462
463 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
464 {
465         struct ci_power_info *pi = ci_get_pi(rdev);
466         u32 data;
467
468         if (pi->caps_sq_ramping) {
469                 data = RREG32_DIDT(DIDT_SQ_CTRL0);
470                 if (enable)
471                         data |= DIDT_CTRL_EN;
472                 else
473                         data &= ~DIDT_CTRL_EN;
474                 WREG32_DIDT(DIDT_SQ_CTRL0, data);
475         }
476
477         if (pi->caps_db_ramping) {
478                 data = RREG32_DIDT(DIDT_DB_CTRL0);
479                 if (enable)
480                         data |= DIDT_CTRL_EN;
481                 else
482                         data &= ~DIDT_CTRL_EN;
483                 WREG32_DIDT(DIDT_DB_CTRL0, data);
484         }
485
486         if (pi->caps_td_ramping) {
487                 data = RREG32_DIDT(DIDT_TD_CTRL0);
488                 if (enable)
489                         data |= DIDT_CTRL_EN;
490                 else
491                         data &= ~DIDT_CTRL_EN;
492                 WREG32_DIDT(DIDT_TD_CTRL0, data);
493         }
494
495         if (pi->caps_tcp_ramping) {
496                 data = RREG32_DIDT(DIDT_TCP_CTRL0);
497                 if (enable)
498                         data |= DIDT_CTRL_EN;
499                 else
500                         data &= ~DIDT_CTRL_EN;
501                 WREG32_DIDT(DIDT_TCP_CTRL0, data);
502         }
503 }
504
505 static int ci_program_pt_config_registers(struct radeon_device *rdev,
506                                           const struct ci_pt_config_reg *cac_config_regs)
507 {
508         const struct ci_pt_config_reg *config_regs = cac_config_regs;
509         u32 data;
510         u32 cache = 0;
511
512         if (config_regs == NULL)
513                 return -EINVAL;
514
515         while (config_regs->offset != 0xFFFFFFFF) {
516                 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
517                         cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
518                 } else {
519                         switch (config_regs->type) {
520                         case CISLANDS_CONFIGREG_SMC_IND:
521                                 data = RREG32_SMC(config_regs->offset);
522                                 break;
523                         case CISLANDS_CONFIGREG_DIDT_IND:
524                                 data = RREG32_DIDT(config_regs->offset);
525                                 break;
526                         default:
527                                 data = RREG32(config_regs->offset << 2);
528                                 break;
529                         }
530
531                         data &= ~config_regs->mask;
532                         data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
533                         data |= cache;
534
535                         switch (config_regs->type) {
536                         case CISLANDS_CONFIGREG_SMC_IND:
537                                 WREG32_SMC(config_regs->offset, data);
538                                 break;
539                         case CISLANDS_CONFIGREG_DIDT_IND:
540                                 WREG32_DIDT(config_regs->offset, data);
541                                 break;
542                         default:
543                                 WREG32(config_regs->offset << 2, data);
544                                 break;
545                         }
546                         cache = 0;
547                 }
548                 config_regs++;
549         }
550         return 0;
551 }
552
553 static int ci_enable_didt(struct radeon_device *rdev, bool enable)
554 {
555         struct ci_power_info *pi = ci_get_pi(rdev);
556         int ret;
557
558         if (pi->caps_sq_ramping || pi->caps_db_ramping ||
559             pi->caps_td_ramping || pi->caps_tcp_ramping) {
560                 cik_enter_rlc_safe_mode(rdev);
561
562                 if (enable) {
563                         ret = ci_program_pt_config_registers(rdev, didt_config_ci);
564                         if (ret) {
565                                 cik_exit_rlc_safe_mode(rdev);
566                                 return ret;
567                         }
568                 }
569
570                 ci_do_enable_didt(rdev, enable);
571
572                 cik_exit_rlc_safe_mode(rdev);
573         }
574
575         return 0;
576 }
577
578 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
579 {
580         struct ci_power_info *pi = ci_get_pi(rdev);
581         PPSMC_Result smc_result;
582         int ret = 0;
583
584         if (enable) {
585                 pi->power_containment_features = 0;
586                 if (pi->caps_power_containment) {
587                         if (pi->enable_bapm_feature) {
588                                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
589                                 if (smc_result != PPSMC_Result_OK)
590                                         ret = -EINVAL;
591                                 else
592                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
593                         }
594
595                         if (pi->enable_tdc_limit_feature) {
596                                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
597                                 if (smc_result != PPSMC_Result_OK)
598                                         ret = -EINVAL;
599                                 else
600                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
601                         }
602
603                         if (pi->enable_pkg_pwr_tracking_feature) {
604                                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
605                                 if (smc_result != PPSMC_Result_OK) {
606                                         ret = -EINVAL;
607                                 } else {
608                                         struct radeon_cac_tdp_table *cac_tdp_table =
609                                                 rdev->pm.dpm.dyn_state.cac_tdp_table;
610                                         u32 default_pwr_limit =
611                                                 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
612
613                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
614
615                                         ci_set_power_limit(rdev, default_pwr_limit);
616                                 }
617                         }
618                 }
619         } else {
620                 if (pi->caps_power_containment && pi->power_containment_features) {
621                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
622                                 ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
623
624                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
625                                 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
626
627                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
628                                 ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
629                         pi->power_containment_features = 0;
630                 }
631         }
632
633         return ret;
634 }
635
636 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
637 {
638         struct ci_power_info *pi = ci_get_pi(rdev);
639         PPSMC_Result smc_result;
640         int ret = 0;
641
642         if (pi->caps_cac) {
643                 if (enable) {
644                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
645                         if (smc_result != PPSMC_Result_OK) {
646                                 ret = -EINVAL;
647                                 pi->cac_enabled = false;
648                         } else {
649                                 pi->cac_enabled = true;
650                         }
651                 } else if (pi->cac_enabled) {
652                         ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
653                         pi->cac_enabled = false;
654                 }
655         }
656
657         return ret;
658 }
659
660 static int ci_power_control_set_level(struct radeon_device *rdev)
661 {
662         struct ci_power_info *pi = ci_get_pi(rdev);
663         struct radeon_cac_tdp_table *cac_tdp_table =
664                 rdev->pm.dpm.dyn_state.cac_tdp_table;
665         s32 adjust_percent;
666         s32 target_tdp;
667         int ret = 0;
668         bool adjust_polarity = false; /* ??? */
669
670         if (pi->caps_power_containment &&
671             (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
672                 adjust_percent = adjust_polarity ?
673                         rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
674                 target_tdp = ((100 + adjust_percent) *
675                               (s32)cac_tdp_table->configurable_tdp) / 100;
676                 target_tdp *= 256;
677
678                 ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
679         }
680
681         return ret;
682 }
683
684 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
685 {
686         struct ci_power_info *pi = ci_get_pi(rdev);
687
688         if (pi->uvd_power_gated == gate)
689                 return;
690
691         pi->uvd_power_gated = gate;
692
693         ci_update_uvd_dpm(rdev, gate);
694 }
695
696 bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
697 {
698         struct ci_power_info *pi = ci_get_pi(rdev);
699         u32 vblank_time = r600_dpm_get_vblank_time(rdev);
700         u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
701
702         if (vblank_time < switch_limit)
703                 return true;
704         else
705                 return false;
706
707 }
708
709 static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
710                                         struct radeon_ps *rps)
711 {
712         struct ci_ps *ps = ci_get_ps(rps);
713         struct ci_power_info *pi = ci_get_pi(rdev);
714         struct radeon_clock_and_voltage_limits *max_limits;
715         bool disable_mclk_switching;
716         u32 sclk, mclk;
717         u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
718         int i;
719
720         if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
721             ci_dpm_vblank_too_short(rdev))
722                 disable_mclk_switching = true;
723         else
724                 disable_mclk_switching = false;
725
726         if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
727                 pi->battery_state = true;
728         else
729                 pi->battery_state = false;
730
731         if (rdev->pm.dpm.ac_power)
732                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
733         else
734                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
735
736         if (rdev->pm.dpm.ac_power == false) {
737                 for (i = 0; i < ps->performance_level_count; i++) {
738                         if (ps->performance_levels[i].mclk > max_limits->mclk)
739                                 ps->performance_levels[i].mclk = max_limits->mclk;
740                         if (ps->performance_levels[i].sclk > max_limits->sclk)
741                                 ps->performance_levels[i].sclk = max_limits->sclk;
742                 }
743         }
744
745         /* limit clocks to max supported clocks based on voltage dependency tables */
746         btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
747                                                         &max_sclk_vddc);
748         btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
749                                                         &max_mclk_vddci);
750         btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
751                                                         &max_mclk_vddc);
752
753         for (i = 0; i < ps->performance_level_count; i++) {
754                 if (max_sclk_vddc) {
755                         if (ps->performance_levels[i].sclk > max_sclk_vddc)
756                                 ps->performance_levels[i].sclk = max_sclk_vddc;
757                 }
758                 if (max_mclk_vddci) {
759                         if (ps->performance_levels[i].mclk > max_mclk_vddci)
760                                 ps->performance_levels[i].mclk = max_mclk_vddci;
761                 }
762                 if (max_mclk_vddc) {
763                         if (ps->performance_levels[i].mclk > max_mclk_vddc)
764                                 ps->performance_levels[i].mclk = max_mclk_vddc;
765                 }
766         }
767
768         /* XXX validate the min clocks required for display */
769
770         if (disable_mclk_switching) {
771                 mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
772                 sclk = ps->performance_levels[0].sclk;
773         } else {
774                 mclk = ps->performance_levels[0].mclk;
775                 sclk = ps->performance_levels[0].sclk;
776         }
777
778         ps->performance_levels[0].sclk = sclk;
779         ps->performance_levels[0].mclk = mclk;
780
781         if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
782                 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
783
784         if (disable_mclk_switching) {
785                 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
786                         ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
787         } else {
788                 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
789                         ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
790         }
791 }
792
793 static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
794                                             int min_temp, int max_temp)
795 {
796         int low_temp = 0 * 1000;
797         int high_temp = 255 * 1000;
798         u32 tmp;
799
800         if (low_temp < min_temp)
801                 low_temp = min_temp;
802         if (high_temp > max_temp)
803                 high_temp = max_temp;
804         if (high_temp < low_temp) {
805                 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
806                 return -EINVAL;
807         }
808
809         tmp = RREG32_SMC(CG_THERMAL_INT);
810         tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
811         tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
812                 CI_DIG_THERM_INTL(low_temp / 1000);
813         WREG32_SMC(CG_THERMAL_INT, tmp);
814
815 #if 0
816         /* XXX: need to figure out how to handle this properly */
817         tmp = RREG32_SMC(CG_THERMAL_CTRL);
818         tmp &= DIG_THERM_DPM_MASK;
819         tmp |= DIG_THERM_DPM(high_temp / 1000);
820         WREG32_SMC(CG_THERMAL_CTRL, tmp);
821 #endif
822
823         return 0;
824 }
825
826 #if 0
827 static int ci_read_smc_soft_register(struct radeon_device *rdev,
828                                      u16 reg_offset, u32 *value)
829 {
830         struct ci_power_info *pi = ci_get_pi(rdev);
831
832         return ci_read_smc_sram_dword(rdev,
833                                       pi->soft_regs_start + reg_offset,
834                                       value, pi->sram_end);
835 }
836 #endif
837
838 static int ci_write_smc_soft_register(struct radeon_device *rdev,
839                                       u16 reg_offset, u32 value)
840 {
841         struct ci_power_info *pi = ci_get_pi(rdev);
842
843         return ci_write_smc_sram_dword(rdev,
844                                        pi->soft_regs_start + reg_offset,
845                                        value, pi->sram_end);
846 }
847
848 static void ci_init_fps_limits(struct radeon_device *rdev)
849 {
850         struct ci_power_info *pi = ci_get_pi(rdev);
851         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
852
853         if (pi->caps_fps) {
854                 u16 tmp;
855
856                 tmp = 45;
857                 table->FpsHighT = cpu_to_be16(tmp);
858
859                 tmp = 30;
860                 table->FpsLowT = cpu_to_be16(tmp);
861         }
862 }
863
864 static int ci_update_sclk_t(struct radeon_device *rdev)
865 {
866         struct ci_power_info *pi = ci_get_pi(rdev);
867         int ret = 0;
868         u32 low_sclk_interrupt_t = 0;
869
870         if (pi->caps_sclk_throttle_low_notification) {
871                 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
872
873                 ret = ci_copy_bytes_to_smc(rdev,
874                                            pi->dpm_table_start +
875                                            offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
876                                            (u8 *)&low_sclk_interrupt_t,
877                                            sizeof(u32), pi->sram_end);
878
879         }
880
881         return ret;
882 }
883
884 static void ci_get_leakage_voltages(struct radeon_device *rdev)
885 {
886         struct ci_power_info *pi = ci_get_pi(rdev);
887         u16 leakage_id, virtual_voltage_id;
888         u16 vddc, vddci;
889         int i;
890
891         pi->vddc_leakage.count = 0;
892         pi->vddci_leakage.count = 0;
893
894         if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
895                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
896                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
897                         if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
898                                                                                  virtual_voltage_id,
899                                                                                  leakage_id) == 0) {
900                                 if (vddc != 0 && vddc != virtual_voltage_id) {
901                                         pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
902                                         pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
903                                         pi->vddc_leakage.count++;
904                                 }
905                                 if (vddci != 0 && vddci != virtual_voltage_id) {
906                                         pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
907                                         pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
908                                         pi->vddci_leakage.count++;
909                                 }
910                         }
911                 }
912         }
913 }
914
915 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
916 {
917         struct ci_power_info *pi = ci_get_pi(rdev);
918         bool want_thermal_protection;
919         enum radeon_dpm_event_src dpm_event_src;
920         u32 tmp;
921
922         switch (sources) {
923         case 0:
924         default:
925                 want_thermal_protection = false;
926                 break;
927         case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
928                 want_thermal_protection = true;
929                 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
930                 break;
931         case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
932                 want_thermal_protection = true;
933                 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
934                 break;
935         case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
936               (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
937                 want_thermal_protection = true;
938                 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
939                 break;
940         }
941
942         if (want_thermal_protection) {
943 #if 0
944                 /* XXX: need to figure out how to handle this properly */
945                 tmp = RREG32_SMC(CG_THERMAL_CTRL);
946                 tmp &= DPM_EVENT_SRC_MASK;
947                 tmp |= DPM_EVENT_SRC(dpm_event_src);
948                 WREG32_SMC(CG_THERMAL_CTRL, tmp);
949 #endif
950
951                 tmp = RREG32_SMC(GENERAL_PWRMGT);
952                 if (pi->thermal_protection)
953                         tmp &= ~THERMAL_PROTECTION_DIS;
954                 else
955                         tmp |= THERMAL_PROTECTION_DIS;
956                 WREG32_SMC(GENERAL_PWRMGT, tmp);
957         } else {
958                 tmp = RREG32_SMC(GENERAL_PWRMGT);
959                 tmp |= THERMAL_PROTECTION_DIS;
960                 WREG32_SMC(GENERAL_PWRMGT, tmp);
961         }
962 }
963
964 static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
965                                            enum radeon_dpm_auto_throttle_src source,
966                                            bool enable)
967 {
968         struct ci_power_info *pi = ci_get_pi(rdev);
969
970         if (enable) {
971                 if (!(pi->active_auto_throttle_sources & (1 << source))) {
972                         pi->active_auto_throttle_sources |= 1 << source;
973                         ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
974                 }
975         } else {
976                 if (pi->active_auto_throttle_sources & (1 << source)) {
977                         pi->active_auto_throttle_sources &= ~(1 << source);
978                         ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
979                 }
980         }
981 }
982
983 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
984 {
985         if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
986                 ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
987 }
988
989 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
990 {
991         struct ci_power_info *pi = ci_get_pi(rdev);
992         PPSMC_Result smc_result;
993
994         if (!pi->need_update_smu7_dpm_table)
995                 return 0;
996
997         if ((!pi->sclk_dpm_key_disabled) &&
998             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
999                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1000                 if (smc_result != PPSMC_Result_OK)
1001                         return -EINVAL;
1002         }
1003
1004         if ((!pi->mclk_dpm_key_disabled) &&
1005             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1006                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1007                 if (smc_result != PPSMC_Result_OK)
1008                         return -EINVAL;
1009         }
1010
1011         pi->need_update_smu7_dpm_table = 0;
1012         return 0;
1013 }
1014
1015 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
1016 {
1017         struct ci_power_info *pi = ci_get_pi(rdev);
1018         PPSMC_Result smc_result;
1019
1020         if (enable) {
1021                 if (!pi->sclk_dpm_key_disabled) {
1022                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
1023                         if (smc_result != PPSMC_Result_OK)
1024                                 return -EINVAL;
1025                 }
1026
1027                 if (!pi->mclk_dpm_key_disabled) {
1028                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1029                         if (smc_result != PPSMC_Result_OK)
1030                                 return -EINVAL;
1031
1032                         WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1033
1034                         WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1035                         WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1036                         WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1037
1038                         udelay(10);
1039
1040                         WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1041                         WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1042                         WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1043                 }
1044         } else {
1045                 if (!pi->sclk_dpm_key_disabled) {
1046                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1047                         if (smc_result != PPSMC_Result_OK)
1048                                 return -EINVAL;
1049                 }
1050
1051                 if (!pi->mclk_dpm_key_disabled) {
1052                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1053                         if (smc_result != PPSMC_Result_OK)
1054                                 return -EINVAL;
1055                 }
1056         }
1057
1058         return 0;
1059 }
1060
1061 static int ci_start_dpm(struct radeon_device *rdev)
1062 {
1063         struct ci_power_info *pi = ci_get_pi(rdev);
1064         PPSMC_Result smc_result;
1065         int ret;
1066         u32 tmp;
1067
1068         tmp = RREG32_SMC(GENERAL_PWRMGT);
1069         tmp |= GLOBAL_PWRMGT_EN;
1070         WREG32_SMC(GENERAL_PWRMGT, tmp);
1071
1072         tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1073         tmp |= DYNAMIC_PM_EN;
1074         WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1075
1076         ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1077
1078         WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1079
1080         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1081         if (smc_result != PPSMC_Result_OK)
1082                 return -EINVAL;
1083
1084         ret = ci_enable_sclk_mclk_dpm(rdev, true);
1085         if (ret)
1086                 return ret;
1087
1088         if (!pi->pcie_dpm_key_disabled) {
1089                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1090                 if (smc_result != PPSMC_Result_OK)
1091                         return -EINVAL;
1092         }
1093
1094         return 0;
1095 }
1096
1097 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1098 {
1099         struct ci_power_info *pi = ci_get_pi(rdev);
1100         PPSMC_Result smc_result;
1101
1102         if (!pi->need_update_smu7_dpm_table)
1103                 return 0;
1104
1105         if ((!pi->sclk_dpm_key_disabled) &&
1106             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1107                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1108                 if (smc_result != PPSMC_Result_OK)
1109                         return -EINVAL;
1110         }
1111
1112         if ((!pi->mclk_dpm_key_disabled) &&
1113             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1114                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1115                 if (smc_result != PPSMC_Result_OK)
1116                         return -EINVAL;
1117         }
1118
1119         return 0;
1120 }
1121
1122 static int ci_stop_dpm(struct radeon_device *rdev)
1123 {
1124         struct ci_power_info *pi = ci_get_pi(rdev);
1125         PPSMC_Result smc_result;
1126         int ret;
1127         u32 tmp;
1128
1129         tmp = RREG32_SMC(GENERAL_PWRMGT);
1130         tmp &= ~GLOBAL_PWRMGT_EN;
1131         WREG32_SMC(GENERAL_PWRMGT, tmp);
1132
1133         tmp = RREG32(SCLK_PWRMGT_CNTL);
1134         tmp &= ~DYNAMIC_PM_EN;
1135         WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1136
1137         if (!pi->pcie_dpm_key_disabled) {
1138                 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1139                 if (smc_result != PPSMC_Result_OK)
1140                         return -EINVAL;
1141         }
1142
1143         ret = ci_enable_sclk_mclk_dpm(rdev, false);
1144         if (ret)
1145                 return ret;
1146
1147         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1148         if (smc_result != PPSMC_Result_OK)
1149                 return -EINVAL;
1150
1151         return 0;
1152 }
1153
1154 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1155 {
1156         u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1157
1158         if (enable)
1159                 tmp &= ~SCLK_PWRMGT_OFF;
1160         else
1161                 tmp |= SCLK_PWRMGT_OFF;
1162         WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1163 }
1164
1165 #if 0
1166 static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1167                                         bool ac_power)
1168 {
1169         struct ci_power_info *pi = ci_get_pi(rdev);
1170         struct radeon_cac_tdp_table *cac_tdp_table =
1171                 rdev->pm.dpm.dyn_state.cac_tdp_table;
1172         u32 power_limit;
1173
1174         if (ac_power)
1175                 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1176         else
1177                 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1178
1179         ci_set_power_limit(rdev, power_limit);
1180
1181         if (pi->caps_automatic_dc_transition) {
1182                 if (ac_power)
1183                         ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1184                 else
1185                         ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1186         }
1187
1188         return 0;
1189 }
1190 #endif
1191
1192 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1193                                                       PPSMC_Msg msg, u32 parameter)
1194 {
1195         WREG32(SMC_MSG_ARG_0, parameter);
1196         return ci_send_msg_to_smc(rdev, msg);
1197 }
1198
1199 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1200                                                         PPSMC_Msg msg, u32 *parameter)
1201 {
1202         PPSMC_Result smc_result;
1203
1204         smc_result = ci_send_msg_to_smc(rdev, msg);
1205
1206         if ((smc_result == PPSMC_Result_OK) && parameter)
1207                 *parameter = RREG32(SMC_MSG_ARG_0);
1208
1209         return smc_result;
1210 }
1211
1212 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1213 {
1214         struct ci_power_info *pi = ci_get_pi(rdev);
1215
1216         if (!pi->sclk_dpm_key_disabled) {
1217                 PPSMC_Result smc_result =
1218                         ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
1219                 if (smc_result != PPSMC_Result_OK)
1220                         return -EINVAL;
1221         }
1222
1223         return 0;
1224 }
1225
1226 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1227 {
1228         struct ci_power_info *pi = ci_get_pi(rdev);
1229
1230         if (!pi->mclk_dpm_key_disabled) {
1231                 PPSMC_Result smc_result =
1232                         ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
1233                 if (smc_result != PPSMC_Result_OK)
1234                         return -EINVAL;
1235         }
1236
1237         return 0;
1238 }
1239
1240 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1241 {
1242         struct ci_power_info *pi = ci_get_pi(rdev);
1243
1244         if (!pi->pcie_dpm_key_disabled) {
1245                 PPSMC_Result smc_result =
1246                         ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1247                 if (smc_result != PPSMC_Result_OK)
1248                         return -EINVAL;
1249         }
1250
1251         return 0;
1252 }
1253
1254 static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1255 {
1256         struct ci_power_info *pi = ci_get_pi(rdev);
1257
1258         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1259                 PPSMC_Result smc_result =
1260                         ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1261                 if (smc_result != PPSMC_Result_OK)
1262                         return -EINVAL;
1263         }
1264
1265         return 0;
1266 }
1267
1268 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1269                                        u32 target_tdp)
1270 {
1271         PPSMC_Result smc_result =
1272                 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1273         if (smc_result != PPSMC_Result_OK)
1274                 return -EINVAL;
1275         return 0;
1276 }
1277
1278 static int ci_set_boot_state(struct radeon_device *rdev)
1279 {
1280         return ci_enable_sclk_mclk_dpm(rdev, false);
1281 }
1282
1283 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1284 {
1285         u32 sclk_freq;
1286         PPSMC_Result smc_result =
1287                 ci_send_msg_to_smc_return_parameter(rdev,
1288                                                     PPSMC_MSG_API_GetSclkFrequency,
1289                                                     &sclk_freq);
1290         if (smc_result != PPSMC_Result_OK)
1291                 sclk_freq = 0;
1292
1293         return sclk_freq;
1294 }
1295
1296 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1297 {
1298         u32 mclk_freq;
1299         PPSMC_Result smc_result =
1300                 ci_send_msg_to_smc_return_parameter(rdev,
1301                                                     PPSMC_MSG_API_GetMclkFrequency,
1302                                                     &mclk_freq);
1303         if (smc_result != PPSMC_Result_OK)
1304                 mclk_freq = 0;
1305
1306         return mclk_freq;
1307 }
1308
1309 static void ci_dpm_start_smc(struct radeon_device *rdev)
1310 {
1311         int i;
1312
1313         ci_program_jump_on_start(rdev);
1314         ci_start_smc_clock(rdev);
1315         ci_start_smc(rdev);
1316         for (i = 0; i < rdev->usec_timeout; i++) {
1317                 if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1318                         break;
1319         }
1320 }
1321
1322 static void ci_dpm_stop_smc(struct radeon_device *rdev)
1323 {
1324         ci_reset_smc(rdev);
1325         ci_stop_smc_clock(rdev);
1326 }
1327
1328 static int ci_process_firmware_header(struct radeon_device *rdev)
1329 {
1330         struct ci_power_info *pi = ci_get_pi(rdev);
1331         u32 tmp;
1332         int ret;
1333
1334         ret = ci_read_smc_sram_dword(rdev,
1335                                      SMU7_FIRMWARE_HEADER_LOCATION +
1336                                      offsetof(SMU7_Firmware_Header, DpmTable),
1337                                      &tmp, pi->sram_end);
1338         if (ret)
1339                 return ret;
1340
1341         pi->dpm_table_start = tmp;
1342
1343         ret = ci_read_smc_sram_dword(rdev,
1344                                      SMU7_FIRMWARE_HEADER_LOCATION +
1345                                      offsetof(SMU7_Firmware_Header, SoftRegisters),
1346                                      &tmp, pi->sram_end);
1347         if (ret)
1348                 return ret;
1349
1350         pi->soft_regs_start = tmp;
1351
1352         ret = ci_read_smc_sram_dword(rdev,
1353                                      SMU7_FIRMWARE_HEADER_LOCATION +
1354                                      offsetof(SMU7_Firmware_Header, mcRegisterTable),
1355                                      &tmp, pi->sram_end);
1356         if (ret)
1357                 return ret;
1358
1359         pi->mc_reg_table_start = tmp;
1360
1361         ret = ci_read_smc_sram_dword(rdev,
1362                                      SMU7_FIRMWARE_HEADER_LOCATION +
1363                                      offsetof(SMU7_Firmware_Header, FanTable),
1364                                      &tmp, pi->sram_end);
1365         if (ret)
1366                 return ret;
1367
1368         pi->fan_table_start = tmp;
1369
1370         ret = ci_read_smc_sram_dword(rdev,
1371                                      SMU7_FIRMWARE_HEADER_LOCATION +
1372                                      offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1373                                      &tmp, pi->sram_end);
1374         if (ret)
1375                 return ret;
1376
1377         pi->arb_table_start = tmp;
1378
1379         return 0;
1380 }
1381
1382 static void ci_read_clock_registers(struct radeon_device *rdev)
1383 {
1384         struct ci_power_info *pi = ci_get_pi(rdev);
1385
1386         pi->clock_registers.cg_spll_func_cntl =
1387                 RREG32_SMC(CG_SPLL_FUNC_CNTL);
1388         pi->clock_registers.cg_spll_func_cntl_2 =
1389                 RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1390         pi->clock_registers.cg_spll_func_cntl_3 =
1391                 RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1392         pi->clock_registers.cg_spll_func_cntl_4 =
1393                 RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1394         pi->clock_registers.cg_spll_spread_spectrum =
1395                 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1396         pi->clock_registers.cg_spll_spread_spectrum_2 =
1397                 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1398         pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1399         pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1400         pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1401         pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1402         pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1403         pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1404         pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1405         pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1406         pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1407 }
1408
1409 static void ci_init_sclk_t(struct radeon_device *rdev)
1410 {
1411         struct ci_power_info *pi = ci_get_pi(rdev);
1412
1413         pi->low_sclk_interrupt_t = 0;
1414 }
1415
1416 static void ci_enable_thermal_protection(struct radeon_device *rdev,
1417                                          bool enable)
1418 {
1419         u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1420
1421         if (enable)
1422                 tmp &= ~THERMAL_PROTECTION_DIS;
1423         else
1424                 tmp |= THERMAL_PROTECTION_DIS;
1425         WREG32_SMC(GENERAL_PWRMGT, tmp);
1426 }
1427
1428 static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1429 {
1430         u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1431
1432         tmp |= STATIC_PM_EN;
1433
1434         WREG32_SMC(GENERAL_PWRMGT, tmp);
1435 }
1436
1437 #if 0
1438 static int ci_enter_ulp_state(struct radeon_device *rdev)
1439 {
1440
1441         WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1442
1443         udelay(25000);
1444
1445         return 0;
1446 }
1447
1448 static int ci_exit_ulp_state(struct radeon_device *rdev)
1449 {
1450         int i;
1451
1452         WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1453
1454         udelay(7000);
1455
1456         for (i = 0; i < rdev->usec_timeout; i++) {
1457                 if (RREG32(SMC_RESP_0) == 1)
1458                         break;
1459                 udelay(1000);
1460         }
1461
1462         return 0;
1463 }
1464 #endif
1465
1466 static int ci_notify_smc_display_change(struct radeon_device *rdev,
1467                                         bool has_display)
1468 {
1469         PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1470
1471         return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
1472 }
1473
1474 static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1475                                       bool enable)
1476 {
1477         struct ci_power_info *pi = ci_get_pi(rdev);
1478
1479         if (enable) {
1480                 if (pi->caps_sclk_ds) {
1481                         if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1482                                 return -EINVAL;
1483                 } else {
1484                         if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1485                                 return -EINVAL;
1486                 }
1487         } else {
1488                 if (pi->caps_sclk_ds) {
1489                         if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1490                                 return -EINVAL;
1491                 }
1492         }
1493
1494         return 0;
1495 }
1496
1497 static void ci_program_display_gap(struct radeon_device *rdev)
1498 {
1499         u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1500         u32 pre_vbi_time_in_us;
1501         u32 frame_time_in_us;
1502         u32 ref_clock = rdev->clock.spll.reference_freq;
1503         u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1504         u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1505
1506         tmp &= ~DISP_GAP_MASK;
1507         if (rdev->pm.dpm.new_active_crtc_count > 0)
1508                 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1509         else
1510                 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1511         WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1512
1513         if (refresh_rate == 0)
1514                 refresh_rate = 60;
1515         if (vblank_time == 0xffffffff)
1516                 vblank_time = 500;
1517         frame_time_in_us = 1000000 / refresh_rate;
1518         pre_vbi_time_in_us =
1519                 frame_time_in_us - 200 - vblank_time;
1520         tmp = pre_vbi_time_in_us * (ref_clock / 100);
1521
1522         WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1523         ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1524         ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1525
1526
1527         ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1528
1529 }
1530
1531 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1532 {
1533         struct ci_power_info *pi = ci_get_pi(rdev);
1534         u32 tmp;
1535
1536         if (enable) {
1537                 if (pi->caps_sclk_ss_support) {
1538                         tmp = RREG32_SMC(GENERAL_PWRMGT);
1539                         tmp |= DYN_SPREAD_SPECTRUM_EN;
1540                         WREG32_SMC(GENERAL_PWRMGT, tmp);
1541                 }
1542         } else {
1543                 tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1544                 tmp &= ~SSEN;
1545                 WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
1546
1547                 tmp = RREG32_SMC(GENERAL_PWRMGT);
1548                 tmp &= ~DYN_SPREAD_SPECTRUM_EN;
1549                 WREG32_SMC(GENERAL_PWRMGT, tmp);
1550         }
1551 }
1552
1553 static void ci_program_sstp(struct radeon_device *rdev)
1554 {
1555         WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
1556 }
1557
1558 static void ci_enable_display_gap(struct radeon_device *rdev)
1559 {
1560         u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1561
1562         tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
1563         tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1564                 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
1565
1566         WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1567 }
1568
1569 static void ci_program_vc(struct radeon_device *rdev)
1570 {
1571         u32 tmp;
1572
1573         tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1574         tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
1575         WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1576
1577         WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
1578         WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
1579         WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
1580         WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
1581         WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
1582         WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
1583         WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
1584         WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
1585 }
1586
1587 static void ci_clear_vc(struct radeon_device *rdev)
1588 {
1589         u32 tmp;
1590
1591         tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1592         tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
1593         WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1594
1595         WREG32_SMC(CG_FTV_0, 0);
1596         WREG32_SMC(CG_FTV_1, 0);
1597         WREG32_SMC(CG_FTV_2, 0);
1598         WREG32_SMC(CG_FTV_3, 0);
1599         WREG32_SMC(CG_FTV_4, 0);
1600         WREG32_SMC(CG_FTV_5, 0);
1601         WREG32_SMC(CG_FTV_6, 0);
1602         WREG32_SMC(CG_FTV_7, 0);
1603 }
1604
1605 static int ci_upload_firmware(struct radeon_device *rdev)
1606 {
1607         struct ci_power_info *pi = ci_get_pi(rdev);
1608         int i, ret;
1609
1610         for (i = 0; i < rdev->usec_timeout; i++) {
1611                 if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
1612                         break;
1613         }
1614         WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
1615
1616         ci_stop_smc_clock(rdev);
1617         ci_reset_smc(rdev);
1618
1619         ret = ci_load_smc_ucode(rdev, pi->sram_end);
1620
1621         return ret;
1622
1623 }
1624
1625 static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
1626                                      struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
1627                                      struct atom_voltage_table *voltage_table)
1628 {
1629         u32 i;
1630
1631         if (voltage_dependency_table == NULL)
1632                 return -EINVAL;
1633
1634         voltage_table->mask_low = 0;
1635         voltage_table->phase_delay = 0;
1636
1637         voltage_table->count = voltage_dependency_table->count;
1638         for (i = 0; i < voltage_table->count; i++) {
1639                 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
1640                 voltage_table->entries[i].smio_low = 0;
1641         }
1642
1643         return 0;
1644 }
1645
1646 static int ci_construct_voltage_tables(struct radeon_device *rdev)
1647 {
1648         struct ci_power_info *pi = ci_get_pi(rdev);
1649         int ret;
1650
1651         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1652                 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
1653                                                     VOLTAGE_OBJ_GPIO_LUT,
1654                                                     &pi->vddc_voltage_table);
1655                 if (ret)
1656                         return ret;
1657         } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1658                 ret = ci_get_svi2_voltage_table(rdev,
1659                                                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
1660                                                 &pi->vddc_voltage_table);
1661                 if (ret)
1662                         return ret;
1663         }
1664
1665         if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
1666                 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
1667                                                          &pi->vddc_voltage_table);
1668
1669         if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1670                 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
1671                                                     VOLTAGE_OBJ_GPIO_LUT,
1672                                                     &pi->vddci_voltage_table);
1673                 if (ret)
1674                         return ret;
1675         } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1676                 ret = ci_get_svi2_voltage_table(rdev,
1677                                                 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
1678                                                 &pi->vddci_voltage_table);
1679                 if (ret)
1680                         return ret;
1681         }
1682
1683         if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
1684                 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
1685                                                          &pi->vddci_voltage_table);
1686
1687         if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1688                 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
1689                                                     VOLTAGE_OBJ_GPIO_LUT,
1690                                                     &pi->mvdd_voltage_table);
1691                 if (ret)
1692                         return ret;
1693         } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1694                 ret = ci_get_svi2_voltage_table(rdev,
1695                                                 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
1696                                                 &pi->mvdd_voltage_table);
1697                 if (ret)
1698                         return ret;
1699         }
1700
1701         if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
1702                 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
1703                                                          &pi->mvdd_voltage_table);
1704
1705         return 0;
1706 }
1707
1708 static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
1709                                           struct atom_voltage_table_entry *voltage_table,
1710                                           SMU7_Discrete_VoltageLevel *smc_voltage_table)
1711 {
1712         int ret;
1713
1714         ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
1715                                             &smc_voltage_table->StdVoltageHiSidd,
1716                                             &smc_voltage_table->StdVoltageLoSidd);
1717
1718         if (ret) {
1719                 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
1720                 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
1721         }
1722
1723         smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
1724         smc_voltage_table->StdVoltageHiSidd =
1725                 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
1726         smc_voltage_table->StdVoltageLoSidd =
1727                 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
1728 }
1729
1730 static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
1731                                       SMU7_Discrete_DpmTable *table)
1732 {
1733         struct ci_power_info *pi = ci_get_pi(rdev);
1734         unsigned int count;
1735
1736         table->VddcLevelCount = pi->vddc_voltage_table.count;
1737         for (count = 0; count < table->VddcLevelCount; count++) {
1738                 ci_populate_smc_voltage_table(rdev,
1739                                               &pi->vddc_voltage_table.entries[count],
1740                                               &table->VddcLevel[count]);
1741
1742                 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1743                         table->VddcLevel[count].Smio |=
1744                                 pi->vddc_voltage_table.entries[count].smio_low;
1745                 else
1746                         table->VddcLevel[count].Smio = 0;
1747         }
1748         table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
1749
1750         return 0;
1751 }
1752
1753 static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
1754                                        SMU7_Discrete_DpmTable *table)
1755 {
1756         unsigned int count;
1757         struct ci_power_info *pi = ci_get_pi(rdev);
1758
1759         table->VddciLevelCount = pi->vddci_voltage_table.count;
1760         for (count = 0; count < table->VddciLevelCount; count++) {
1761                 ci_populate_smc_voltage_table(rdev,
1762                                               &pi->vddci_voltage_table.entries[count],
1763                                               &table->VddciLevel[count]);
1764
1765                 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1766                         table->VddciLevel[count].Smio |=
1767                                 pi->vddci_voltage_table.entries[count].smio_low;
1768                 else
1769                         table->VddciLevel[count].Smio = 0;
1770         }
1771         table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
1772
1773         return 0;
1774 }
1775
1776 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
1777                                       SMU7_Discrete_DpmTable *table)
1778 {
1779         struct ci_power_info *pi = ci_get_pi(rdev);
1780         unsigned int count;
1781
1782         table->MvddLevelCount = pi->mvdd_voltage_table.count;
1783         for (count = 0; count < table->MvddLevelCount; count++) {
1784                 ci_populate_smc_voltage_table(rdev,
1785                                               &pi->mvdd_voltage_table.entries[count],
1786                                               &table->MvddLevel[count]);
1787
1788                 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1789                         table->MvddLevel[count].Smio |=
1790                                 pi->mvdd_voltage_table.entries[count].smio_low;
1791                 else
1792                         table->MvddLevel[count].Smio = 0;
1793         }
1794         table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
1795
1796         return 0;
1797 }
1798
1799 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
1800                                           SMU7_Discrete_DpmTable *table)
1801 {
1802         int ret;
1803
1804         ret = ci_populate_smc_vddc_table(rdev, table);
1805         if (ret)
1806                 return ret;
1807
1808         ret = ci_populate_smc_vddci_table(rdev, table);
1809         if (ret)
1810                 return ret;
1811
1812         ret = ci_populate_smc_mvdd_table(rdev, table);
1813         if (ret)
1814                 return ret;
1815
1816         return 0;
1817 }
1818
1819 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
1820                                   SMU7_Discrete_VoltageLevel *voltage)
1821 {
1822         struct ci_power_info *pi = ci_get_pi(rdev);
1823         u32 i = 0;
1824
1825         if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
1826                 for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
1827                         if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
1828                                 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
1829                                 break;
1830                         }
1831                 }
1832
1833                 if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
1834                         return -EINVAL;
1835         }
1836
1837         return -EINVAL;
1838 }
1839
1840 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
1841                                          struct atom_voltage_table_entry *voltage_table,
1842                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
1843 {
1844         u16 v_index, idx;
1845         bool voltage_found = false;
1846         *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
1847         *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
1848
1849         if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
1850                 return -EINVAL;
1851
1852         if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1853                 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1854                         if (voltage_table->value ==
1855                             rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1856                                 voltage_found = true;
1857                                 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1858                                         idx = v_index;
1859                                 else
1860                                         idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1861                                 *std_voltage_lo_sidd =
1862                                         rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1863                                 *std_voltage_hi_sidd =
1864                                         rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1865                                 break;
1866                         }
1867                 }
1868
1869                 if (!voltage_found) {
1870                         for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1871                                 if (voltage_table->value <=
1872                                     rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1873                                         voltage_found = true;
1874                                         if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1875                                                 idx = v_index;
1876                                         else
1877                                                 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1878                                         *std_voltage_lo_sidd =
1879                                                 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1880                                         *std_voltage_hi_sidd =
1881                                                 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1882                                         break;
1883                                 }
1884                         }
1885                 }
1886         }
1887
1888         return 0;
1889 }
1890
1891 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
1892                                                   const struct radeon_phase_shedding_limits_table *limits,
1893                                                   u32 sclk,
1894                                                   u32 *phase_shedding)
1895 {
1896         unsigned int i;
1897
1898         *phase_shedding = 1;
1899
1900         for (i = 0; i < limits->count; i++) {
1901                 if (sclk < limits->entries[i].sclk) {
1902                         *phase_shedding = i;
1903                         break;
1904                 }
1905         }
1906 }
1907
1908 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
1909                                                   const struct radeon_phase_shedding_limits_table *limits,
1910                                                   u32 mclk,
1911                                                   u32 *phase_shedding)
1912 {
1913         unsigned int i;
1914
1915         *phase_shedding = 1;
1916
1917         for (i = 0; i < limits->count; i++) {
1918                 if (mclk < limits->entries[i].mclk) {
1919                         *phase_shedding = i;
1920                         break;
1921                 }
1922         }
1923 }
1924
1925 static int ci_init_arb_table_index(struct radeon_device *rdev)
1926 {
1927         struct ci_power_info *pi = ci_get_pi(rdev);
1928         u32 tmp;
1929         int ret;
1930
1931         ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
1932                                      &tmp, pi->sram_end);
1933         if (ret)
1934                 return ret;
1935
1936         tmp &= 0x00FFFFFF;
1937         tmp |= MC_CG_ARB_FREQ_F1 << 24;
1938
1939         return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
1940                                        tmp, pi->sram_end);
1941 }
1942
1943 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
1944                                          struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
1945                                          u32 clock, u32 *voltage)
1946 {
1947         u32 i = 0;
1948
1949         if (allowed_clock_voltage_table->count == 0)
1950                 return -EINVAL;
1951
1952         for (i = 0; i < allowed_clock_voltage_table->count; i++) {
1953                 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
1954                         *voltage = allowed_clock_voltage_table->entries[i].v;
1955                         return 0;
1956                 }
1957         }
1958
1959         *voltage = allowed_clock_voltage_table->entries[i-1].v;
1960
1961         return 0;
1962 }
1963
1964 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1965                                              u32 sclk, u32 min_sclk_in_sr)
1966 {
1967         u32 i;
1968         u32 tmp;
1969         u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
1970                 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
1971
1972         if (sclk < min)
1973                 return 0;
1974
1975         for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
1976                 tmp = sclk / (1 << i);
1977                 if (tmp >= min || i == 0)
1978                         break;
1979         }
1980
1981         return (u8)i;
1982 }
1983
1984 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
1985 {
1986         return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
1987 }
1988
1989 static int ci_reset_to_default(struct radeon_device *rdev)
1990 {
1991         return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
1992                 0 : -EINVAL;
1993 }
1994
1995 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
1996 {
1997         u32 tmp;
1998
1999         tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
2000
2001         if (tmp == MC_CG_ARB_FREQ_F0)
2002                 return 0;
2003
2004         return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2005 }
2006
2007 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2008                                                 u32 sclk,
2009                                                 u32 mclk,
2010                                                 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2011 {
2012         u32 dram_timing;
2013         u32 dram_timing2;
2014         u32 burst_time;
2015
2016         radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
2017
2018         dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
2019         dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2020         burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2021
2022         arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2023         arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2024         arb_regs->McArbBurstTime = (u8)burst_time;
2025
2026         return 0;
2027 }
2028
2029 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2030 {
2031         struct ci_power_info *pi = ci_get_pi(rdev);
2032         SMU7_Discrete_MCArbDramTimingTable arb_regs;
2033         u32 i, j;
2034         int ret =  0;
2035
2036         memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2037
2038         for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2039                 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2040                         ret = ci_populate_memory_timing_parameters(rdev,
2041                                                                    pi->dpm_table.sclk_table.dpm_levels[i].value,
2042                                                                    pi->dpm_table.mclk_table.dpm_levels[j].value,
2043                                                                    &arb_regs.entries[i][j]);
2044                         if (ret)
2045                                 break;
2046                 }
2047         }
2048
2049         if (ret == 0)
2050                 ret = ci_copy_bytes_to_smc(rdev,
2051                                            pi->arb_table_start,
2052                                            (u8 *)&arb_regs,
2053                                            sizeof(SMU7_Discrete_MCArbDramTimingTable),
2054                                            pi->sram_end);
2055
2056         return ret;
2057 }
2058
2059 static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2060 {
2061         struct ci_power_info *pi = ci_get_pi(rdev);
2062
2063         if (pi->need_update_smu7_dpm_table == 0)
2064                 return 0;
2065
2066         return ci_do_program_memory_timing_parameters(rdev);
2067 }
2068
2069 static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2070                                           struct radeon_ps *radeon_boot_state)
2071 {
2072         struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2073         struct ci_power_info *pi = ci_get_pi(rdev);
2074         u32 level = 0;
2075
2076         for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2077                 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2078                     boot_state->performance_levels[0].sclk) {
2079                         pi->smc_state_table.GraphicsBootLevel = level;
2080                         break;
2081                 }
2082         }
2083
2084         for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2085                 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2086                     boot_state->performance_levels[0].mclk) {
2087                         pi->smc_state_table.MemoryBootLevel = level;
2088                         break;
2089                 }
2090         }
2091 }
2092
2093 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2094 {
2095         u32 i;
2096         u32 mask_value = 0;
2097
2098         for (i = dpm_table->count; i > 0; i--) {
2099                 mask_value = mask_value << 1;
2100                 if (dpm_table->dpm_levels[i-1].enabled)
2101                         mask_value |= 0x1;
2102                 else
2103                         mask_value &= 0xFFFFFFFE;
2104         }
2105
2106         return mask_value;
2107 }
2108
2109 static void ci_populate_smc_link_level(struct radeon_device *rdev,
2110                                        SMU7_Discrete_DpmTable *table)
2111 {
2112         struct ci_power_info *pi = ci_get_pi(rdev);
2113         struct ci_dpm_table *dpm_table = &pi->dpm_table;
2114         u32 i;
2115
2116         for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2117                 table->LinkLevel[i].PcieGenSpeed =
2118                         (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2119                 table->LinkLevel[i].PcieLaneCount =
2120                         r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2121                 table->LinkLevel[i].EnabledForActivity = 1;
2122                 table->LinkLevel[i].DownT = cpu_to_be32(5);
2123                 table->LinkLevel[i].UpT = cpu_to_be32(30);
2124         }
2125
2126         pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2127         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2128                 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2129 }
2130
2131 static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2132                                      SMU7_Discrete_DpmTable *table)
2133 {
2134         u32 count;
2135         struct atom_clock_dividers dividers;
2136         int ret = -EINVAL;
2137
2138         table->UvdLevelCount =
2139                 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2140
2141         for (count = 0; count < table->UvdLevelCount; count++) {
2142                 table->UvdLevel[count].VclkFrequency =
2143                         rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2144                 table->UvdLevel[count].DclkFrequency =
2145                         rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2146                 table->UvdLevel[count].MinVddc =
2147                         rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2148                 table->UvdLevel[count].MinVddcPhases = 1;
2149
2150                 ret = radeon_atom_get_clock_dividers(rdev,
2151                                                      COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2152                                                      table->UvdLevel[count].VclkFrequency, false, &dividers);
2153                 if (ret)
2154                         return ret;
2155
2156                 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2157
2158                 ret = radeon_atom_get_clock_dividers(rdev,
2159                                                      COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2160                                                      table->UvdLevel[count].DclkFrequency, false, &dividers);
2161                 if (ret)
2162                         return ret;
2163
2164                 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2165
2166                 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2167                 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2168                 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2169         }
2170
2171         return ret;
2172 }
2173
2174 static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2175                                      SMU7_Discrete_DpmTable *table)
2176 {
2177         u32 count;
2178         struct atom_clock_dividers dividers;
2179         int ret = -EINVAL;
2180
2181         table->VceLevelCount =
2182                 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2183
2184         for (count = 0; count < table->VceLevelCount; count++) {
2185                 table->VceLevel[count].Frequency =
2186                         rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2187                 table->VceLevel[count].MinVoltage =
2188                         (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2189                 table->VceLevel[count].MinPhases = 1;
2190
2191                 ret = radeon_atom_get_clock_dividers(rdev,
2192                                                      COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2193                                                      table->VceLevel[count].Frequency, false, &dividers);
2194                 if (ret)
2195                         return ret;
2196
2197                 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2198
2199                 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2200                 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2201         }
2202
2203         return ret;
2204
2205 }
2206
2207 static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2208                                      SMU7_Discrete_DpmTable *table)
2209 {
2210         u32 count;
2211         struct atom_clock_dividers dividers;
2212         int ret = -EINVAL;
2213
2214         table->AcpLevelCount = (u8)
2215                 (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2216
2217         for (count = 0; count < table->AcpLevelCount; count++) {
2218                 table->AcpLevel[count].Frequency =
2219                         rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2220                 table->AcpLevel[count].MinVoltage =
2221                         rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2222                 table->AcpLevel[count].MinPhases = 1;
2223
2224                 ret = radeon_atom_get_clock_dividers(rdev,
2225                                                      COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2226                                                      table->AcpLevel[count].Frequency, false, &dividers);
2227                 if (ret)
2228                         return ret;
2229
2230                 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2231
2232                 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2233                 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2234         }
2235
2236         return ret;
2237 }
2238
2239 static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2240                                       SMU7_Discrete_DpmTable *table)
2241 {
2242         u32 count;
2243         struct atom_clock_dividers dividers;
2244         int ret = -EINVAL;
2245
2246         table->SamuLevelCount =
2247                 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2248
2249         for (count = 0; count < table->SamuLevelCount; count++) {
2250                 table->SamuLevel[count].Frequency =
2251                         rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2252                 table->SamuLevel[count].MinVoltage =
2253                         rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2254                 table->SamuLevel[count].MinPhases = 1;
2255
2256                 ret = radeon_atom_get_clock_dividers(rdev,
2257                                                      COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2258                                                      table->SamuLevel[count].Frequency, false, &dividers);
2259                 if (ret)
2260                         return ret;
2261
2262                 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2263
2264                 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2265                 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2266         }
2267
2268         return ret;
2269 }
2270
2271 static int ci_calculate_mclk_params(struct radeon_device *rdev,
2272                                     u32 memory_clock,
2273                                     SMU7_Discrete_MemoryLevel *mclk,
2274                                     bool strobe_mode,
2275                                     bool dll_state_on)
2276 {
2277         struct ci_power_info *pi = ci_get_pi(rdev);
2278         u32  dll_cntl = pi->clock_registers.dll_cntl;
2279         u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2280         u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2281         u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2282         u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2283         u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2284         u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2285         u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2286         u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2287         struct atom_mpll_param mpll_param;
2288         int ret;
2289
2290         ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2291         if (ret)
2292                 return ret;
2293
2294         mpll_func_cntl &= ~BWCTRL_MASK;
2295         mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2296
2297         mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2298         mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2299                 CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2300
2301         mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2302         mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2303
2304         if (pi->mem_gddr5) {
2305                 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2306                 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2307                         YCLK_POST_DIV(mpll_param.post_div);
2308         }
2309
2310         if (pi->caps_mclk_ss_support) {
2311                 struct radeon_atom_ss ss;
2312                 u32 freq_nom;
2313                 u32 tmp;
2314                 u32 reference_clock = rdev->clock.mpll.reference_freq;
2315
2316                 if (pi->mem_gddr5)
2317                         freq_nom = memory_clock * 4;
2318                 else
2319                         freq_nom = memory_clock * 2;
2320
2321                 tmp = (freq_nom / reference_clock);
2322                 tmp = tmp * tmp;
2323                 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2324                                                      ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2325                         u32 clks = reference_clock * 5 / ss.rate;
2326                         u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2327
2328                         mpll_ss1 &= ~CLKV_MASK;
2329                         mpll_ss1 |= CLKV(clkv);
2330
2331                         mpll_ss2 &= ~CLKS_MASK;
2332                         mpll_ss2 |= CLKS(clks);
2333                 }
2334         }
2335
2336         mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2337         mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2338
2339         if (dll_state_on)
2340                 mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2341         else
2342                 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2343
2344         mclk->MclkFrequency = memory_clock;
2345         mclk->MpllFuncCntl = mpll_func_cntl;
2346         mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2347         mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2348         mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2349         mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2350         mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2351         mclk->DllCntl = dll_cntl;
2352         mclk->MpllSs1 = mpll_ss1;
2353         mclk->MpllSs2 = mpll_ss2;
2354
2355         return 0;
2356 }
2357
2358 static int ci_populate_single_memory_level(struct radeon_device *rdev,
2359                                            u32 memory_clock,
2360                                            SMU7_Discrete_MemoryLevel *memory_level)
2361 {
2362         struct ci_power_info *pi = ci_get_pi(rdev);
2363         int ret;
2364         bool dll_state_on;
2365
2366         if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2367                 ret = ci_get_dependency_volt_by_clk(rdev,
2368                                                     &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2369                                                     memory_clock, &memory_level->MinVddc);
2370                 if (ret)
2371                         return ret;
2372         }
2373
2374         if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2375                 ret = ci_get_dependency_volt_by_clk(rdev,
2376                                                     &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2377                                                     memory_clock, &memory_level->MinVddci);
2378                 if (ret)
2379                         return ret;
2380         }
2381
2382         if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2383                 ret = ci_get_dependency_volt_by_clk(rdev,
2384                                                     &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2385                                                     memory_clock, &memory_level->MinMvdd);
2386                 if (ret)
2387                         return ret;
2388         }
2389
2390         memory_level->MinVddcPhases = 1;
2391
2392         if (pi->vddc_phase_shed_control)
2393                 ci_populate_phase_value_based_on_mclk(rdev,
2394                                                       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2395                                                       memory_clock,
2396                                                       &memory_level->MinVddcPhases);
2397
2398         memory_level->EnabledForThrottle = 1;
2399         memory_level->EnabledForActivity = 1;
2400         memory_level->UpH = 0;
2401         memory_level->DownH = 100;
2402         memory_level->VoltageDownH = 0;
2403         memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2404
2405         memory_level->StutterEnable = false;
2406         memory_level->StrobeEnable = false;
2407         memory_level->EdcReadEnable = false;
2408         memory_level->EdcWriteEnable = false;
2409         memory_level->RttEnable = false;
2410
2411         memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2412
2413         if (pi->mclk_stutter_mode_threshold &&
2414             (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2415             (pi->uvd_enabled == false) &&
2416             (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2417             (rdev->pm.dpm.new_active_crtc_count <= 2))
2418                 memory_level->StutterEnable = true;
2419
2420         if (pi->mclk_strobe_mode_threshold &&
2421             (memory_clock <= pi->mclk_strobe_mode_threshold))
2422                 memory_level->StrobeEnable = 1;
2423
2424         if (pi->mem_gddr5) {
2425                 memory_level->StrobeRatio =
2426                         si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2427                 if (pi->mclk_edc_enable_threshold &&
2428                     (memory_clock > pi->mclk_edc_enable_threshold))
2429                         memory_level->EdcReadEnable = true;
2430
2431                 if (pi->mclk_edc_wr_enable_threshold &&
2432                     (memory_clock > pi->mclk_edc_wr_enable_threshold))
2433                         memory_level->EdcWriteEnable = true;
2434
2435                 if (memory_level->StrobeEnable) {
2436                         if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2437                             ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2438                                 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2439                         else
2440                                 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2441                 } else {
2442                         dll_state_on = pi->dll_default_on;
2443                 }
2444         } else {
2445                 memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2446                 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2447         }
2448
2449         ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2450         if (ret)
2451                 return ret;
2452
2453         memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2454         memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2455         memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2456         memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2457
2458         memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2459         memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2460         memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2461         memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2462         memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2463         memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2464         memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2465         memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2466         memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2467         memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2468         memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2469
2470         return 0;
2471 }
2472
2473 static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2474                                       SMU7_Discrete_DpmTable *table)
2475 {
2476         struct ci_power_info *pi = ci_get_pi(rdev);
2477         struct atom_clock_dividers dividers;
2478         SMU7_Discrete_VoltageLevel voltage_level;
2479         u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2480         u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2481         u32 dll_cntl = pi->clock_registers.dll_cntl;
2482         u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2483         int ret;
2484
2485         table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2486
2487         if (pi->acpi_vddc)
2488                 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2489         else
2490                 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2491
2492         table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2493
2494         table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2495
2496         ret = radeon_atom_get_clock_dividers(rdev,
2497                                              COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2498                                              table->ACPILevel.SclkFrequency, false, &dividers);
2499         if (ret)
2500                 return ret;
2501
2502         table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2503         table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2504         table->ACPILevel.DeepSleepDivId = 0;
2505
2506         spll_func_cntl &= ~SPLL_PWRON;
2507         spll_func_cntl |= SPLL_RESET;
2508
2509         spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2510         spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2511
2512         table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2513         table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2514         table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
2515         table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
2516         table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
2517         table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2518         table->ACPILevel.CcPwrDynRm = 0;
2519         table->ACPILevel.CcPwrDynRm1 = 0;
2520
2521         table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
2522         table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
2523         table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
2524         table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
2525         table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
2526         table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
2527         table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
2528         table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
2529         table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
2530         table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
2531         table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
2532
2533         table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
2534         table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
2535
2536         if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2537                 if (pi->acpi_vddci)
2538                         table->MemoryACPILevel.MinVddci =
2539                                 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
2540                 else
2541                         table->MemoryACPILevel.MinVddci =
2542                                 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
2543         }
2544
2545         if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
2546                 table->MemoryACPILevel.MinMvdd = 0;
2547         else
2548                 table->MemoryACPILevel.MinMvdd =
2549                         cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
2550
2551         mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
2552         mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2553
2554         dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
2555
2556         table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
2557         table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
2558         table->MemoryACPILevel.MpllAdFuncCntl =
2559                 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
2560         table->MemoryACPILevel.MpllDqFuncCntl =
2561                 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
2562         table->MemoryACPILevel.MpllFuncCntl =
2563                 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
2564         table->MemoryACPILevel.MpllFuncCntl_1 =
2565                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
2566         table->MemoryACPILevel.MpllFuncCntl_2 =
2567                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
2568         table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
2569         table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
2570
2571         table->MemoryACPILevel.EnabledForThrottle = 0;
2572         table->MemoryACPILevel.EnabledForActivity = 0;
2573         table->MemoryACPILevel.UpH = 0;
2574         table->MemoryACPILevel.DownH = 100;
2575         table->MemoryACPILevel.VoltageDownH = 0;
2576         table->MemoryACPILevel.ActivityLevel =
2577                 cpu_to_be16((u16)pi->mclk_activity_target);
2578
2579         table->MemoryACPILevel.StutterEnable = false;
2580         table->MemoryACPILevel.StrobeEnable = false;
2581         table->MemoryACPILevel.EdcReadEnable = false;
2582         table->MemoryACPILevel.EdcWriteEnable = false;
2583         table->MemoryACPILevel.RttEnable = false;
2584
2585         return 0;
2586 }
2587
2588
2589 static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
2590 {
2591         struct ci_power_info *pi = ci_get_pi(rdev);
2592         struct ci_ulv_parm *ulv = &pi->ulv;
2593
2594         if (ulv->supported) {
2595                 if (enable)
2596                         return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
2597                                 0 : -EINVAL;
2598                 else
2599                         return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
2600                                 0 : -EINVAL;
2601         }
2602
2603         return 0;
2604 }
2605
2606 static int ci_populate_ulv_level(struct radeon_device *rdev,
2607                                  SMU7_Discrete_Ulv *state)
2608 {
2609         struct ci_power_info *pi = ci_get_pi(rdev);
2610         u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
2611
2612         state->CcPwrDynRm = 0;
2613         state->CcPwrDynRm1 = 0;
2614
2615         if (ulv_voltage == 0) {
2616                 pi->ulv.supported = false;
2617                 return 0;
2618         }
2619
2620         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2621                 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2622                         state->VddcOffset = 0;
2623                 else
2624                         state->VddcOffset =
2625                                 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
2626         } else {
2627                 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2628                         state->VddcOffsetVid = 0;
2629                 else
2630                         state->VddcOffsetVid = (u8)
2631                                 ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
2632                                  VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2633         }
2634         state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
2635
2636         state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
2637         state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
2638         state->VddcOffset = cpu_to_be16(state->VddcOffset);
2639
2640         return 0;
2641 }
2642
2643 static int ci_calculate_sclk_params(struct radeon_device *rdev,
2644                                     u32 engine_clock,
2645                                     SMU7_Discrete_GraphicsLevel *sclk)
2646 {
2647         struct ci_power_info *pi = ci_get_pi(rdev);
2648         struct atom_clock_dividers dividers;
2649         u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
2650         u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
2651         u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
2652         u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2653         u32 reference_clock = rdev->clock.spll.reference_freq;
2654         u32 reference_divider;
2655         u32 fbdiv;
2656         int ret;
2657
2658         ret = radeon_atom_get_clock_dividers(rdev,
2659                                              COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2660                                              engine_clock, false, &dividers);
2661         if (ret)
2662                 return ret;
2663
2664         reference_divider = 1 + dividers.ref_div;
2665         fbdiv = dividers.fb_div & 0x3FFFFFF;
2666
2667         spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
2668         spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
2669         spll_func_cntl_3 |= SPLL_DITHEN;
2670
2671         if (pi->caps_sclk_ss_support) {
2672                 struct radeon_atom_ss ss;
2673                 u32 vco_freq = engine_clock * dividers.post_div;
2674
2675                 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2676                                                      ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
2677                         u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
2678                         u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
2679
2680                         cg_spll_spread_spectrum &= ~CLK_S_MASK;
2681                         cg_spll_spread_spectrum |= CLK_S(clk_s);
2682                         cg_spll_spread_spectrum |= SSEN;
2683
2684                         cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
2685                         cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
2686                 }
2687         }
2688
2689         sclk->SclkFrequency = engine_clock;
2690         sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2691         sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2692         sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2693         sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
2694         sclk->SclkDid = (u8)dividers.post_divider;
2695
2696         return 0;
2697 }
2698
2699 static int ci_populate_single_graphic_level(struct radeon_device *rdev,
2700                                             u32 engine_clock,
2701                                             u16 sclk_activity_level_t,
2702                                             SMU7_Discrete_GraphicsLevel *graphic_level)
2703 {
2704         struct ci_power_info *pi = ci_get_pi(rdev);
2705         int ret;
2706
2707         ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
2708         if (ret)
2709                 return ret;
2710
2711         ret = ci_get_dependency_volt_by_clk(rdev,
2712                                             &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2713                                             engine_clock, &graphic_level->MinVddc);
2714         if (ret)
2715                 return ret;
2716
2717         graphic_level->SclkFrequency = engine_clock;
2718
2719         graphic_level->Flags =  0;
2720         graphic_level->MinVddcPhases = 1;
2721
2722         if (pi->vddc_phase_shed_control)
2723                 ci_populate_phase_value_based_on_sclk(rdev,
2724                                                       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2725                                                       engine_clock,
2726                                                       &graphic_level->MinVddcPhases);
2727
2728         graphic_level->ActivityLevel = sclk_activity_level_t;
2729
2730         graphic_level->CcPwrDynRm = 0;
2731         graphic_level->CcPwrDynRm1 = 0;
2732         graphic_level->EnabledForActivity = 1;
2733         graphic_level->EnabledForThrottle = 1;
2734         graphic_level->UpH = 0;
2735         graphic_level->DownH = 0;
2736         graphic_level->VoltageDownH = 0;
2737         graphic_level->PowerThrottle = 0;
2738
2739         if (pi->caps_sclk_ds)
2740                 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
2741                                                                                    engine_clock,
2742                                                                                    CISLAND_MINIMUM_ENGINE_CLOCK);
2743
2744         graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2745
2746         graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
2747         graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
2748         graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
2749         graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
2750         graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
2751         graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
2752         graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
2753         graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
2754         graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
2755         graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
2756         graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
2757
2758         return 0;
2759 }
2760
2761 static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
2762 {
2763         struct ci_power_info *pi = ci_get_pi(rdev);
2764         struct ci_dpm_table *dpm_table = &pi->dpm_table;
2765         u32 level_array_address = pi->dpm_table_start +
2766                 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2767         u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
2768                 SMU7_MAX_LEVELS_GRAPHICS;
2769         SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
2770         u32 i, ret;
2771
2772         memset(levels, 0, level_array_size);
2773
2774         for (i = 0; i < dpm_table->sclk_table.count; i++) {
2775                 ret = ci_populate_single_graphic_level(rdev,
2776                                                        dpm_table->sclk_table.dpm_levels[i].value,
2777                                                        (u16)pi->activity_target[i],
2778                                                        &pi->smc_state_table.GraphicsLevel[i]);
2779                 if (ret)
2780                         return ret;
2781                 if (i == (dpm_table->sclk_table.count - 1))
2782                         pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
2783                                 PPSMC_DISPLAY_WATERMARK_HIGH;
2784         }
2785
2786         pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
2787         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
2788                 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2789
2790         ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2791                                    (u8 *)levels, level_array_size,
2792                                    pi->sram_end);
2793         if (ret)
2794                 return ret;
2795
2796         return 0;
2797 }
2798
2799 static int ci_populate_ulv_state(struct radeon_device *rdev,
2800                                  SMU7_Discrete_Ulv *ulv_level)
2801 {
2802         return ci_populate_ulv_level(rdev, ulv_level);
2803 }
2804
2805 static int ci_populate_all_memory_levels(struct radeon_device *rdev)
2806 {
2807         struct ci_power_info *pi = ci_get_pi(rdev);
2808         struct ci_dpm_table *dpm_table = &pi->dpm_table;
2809         u32 level_array_address = pi->dpm_table_start +
2810                 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2811         u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
2812                 SMU7_MAX_LEVELS_MEMORY;
2813         SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
2814         u32 i, ret;
2815
2816         memset(levels, 0, level_array_size);
2817
2818         for (i = 0; i < dpm_table->mclk_table.count; i++) {
2819                 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
2820                         return -EINVAL;
2821                 ret = ci_populate_single_memory_level(rdev,
2822                                                       dpm_table->mclk_table.dpm_levels[i].value,
2823                                                       &pi->smc_state_table.MemoryLevel[i]);
2824                 if (ret)
2825                         return ret;
2826         }
2827
2828         pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
2829
2830         pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
2831         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
2832                 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2833
2834         pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
2835                 PPSMC_DISPLAY_WATERMARK_HIGH;
2836
2837         ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2838                                    (u8 *)levels, level_array_size,
2839                                    pi->sram_end);
2840         if (ret)
2841                 return ret;
2842
2843         return 0;
2844 }
2845
2846 static void ci_reset_single_dpm_table(struct radeon_device *rdev,
2847                                       struct ci_single_dpm_table* dpm_table,
2848                                       u32 count)
2849 {
2850         u32 i;
2851
2852         dpm_table->count = count;
2853         for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
2854                 dpm_table->dpm_levels[i].enabled = false;
2855 }
2856
2857 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
2858                                       u32 index, u32 pcie_gen, u32 pcie_lanes)
2859 {
2860         dpm_table->dpm_levels[index].value = pcie_gen;
2861         dpm_table->dpm_levels[index].param1 = pcie_lanes;
2862         dpm_table->dpm_levels[index].enabled = true;
2863 }
2864
2865 static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
2866 {
2867         struct ci_power_info *pi = ci_get_pi(rdev);
2868
2869         if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
2870                 return -EINVAL;
2871
2872         if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
2873                 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
2874                 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
2875         } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
2876                 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
2877                 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
2878         }
2879
2880         ci_reset_single_dpm_table(rdev,
2881                                   &pi->dpm_table.pcie_speed_table,
2882                                   SMU7_MAX_LEVELS_LINK);
2883
2884         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
2885                                   pi->pcie_gen_powersaving.min,
2886                                   pi->pcie_lane_powersaving.min);
2887         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
2888                                   pi->pcie_gen_performance.min,
2889                                   pi->pcie_lane_performance.min);
2890         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
2891                                   pi->pcie_gen_powersaving.min,
2892                                   pi->pcie_lane_powersaving.max);
2893         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
2894                                   pi->pcie_gen_performance.min,
2895                                   pi->pcie_lane_performance.max);
2896         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
2897                                   pi->pcie_gen_powersaving.max,
2898                                   pi->pcie_lane_powersaving.max);
2899         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
2900                                   pi->pcie_gen_performance.max,
2901                                   pi->pcie_lane_performance.max);
2902
2903         pi->dpm_table.pcie_speed_table.count = 6;
2904
2905         return 0;
2906 }
2907
2908 static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
2909 {
2910         struct ci_power_info *pi = ci_get_pi(rdev);
2911         struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
2912                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2913         struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
2914                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
2915         struct radeon_cac_leakage_table *std_voltage_table =
2916                 &rdev->pm.dpm.dyn_state.cac_leakage_table;
2917         u32 i;
2918
2919         if (allowed_sclk_vddc_table == NULL)
2920                 return -EINVAL;
2921         if (allowed_sclk_vddc_table->count < 1)
2922                 return -EINVAL;
2923         if (allowed_mclk_table == NULL)
2924                 return -EINVAL;
2925         if (allowed_mclk_table->count < 1)
2926                 return -EINVAL;
2927
2928         memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
2929
2930         ci_reset_single_dpm_table(rdev,
2931                                   &pi->dpm_table.sclk_table,
2932                                   SMU7_MAX_LEVELS_GRAPHICS);
2933         ci_reset_single_dpm_table(rdev,
2934                                   &pi->dpm_table.mclk_table,
2935                                   SMU7_MAX_LEVELS_MEMORY);
2936         ci_reset_single_dpm_table(rdev,
2937                                   &pi->dpm_table.vddc_table,
2938                                   SMU7_MAX_LEVELS_VDDC);
2939         ci_reset_single_dpm_table(rdev,
2940                                   &pi->dpm_table.vddci_table,
2941                                   SMU7_MAX_LEVELS_VDDCI);
2942         ci_reset_single_dpm_table(rdev,
2943                                   &pi->dpm_table.mvdd_table,
2944                                   SMU7_MAX_LEVELS_MVDD);
2945
2946         pi->dpm_table.sclk_table.count = 0;
2947         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2948                 if ((i == 0) ||
2949                     (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
2950                      allowed_sclk_vddc_table->entries[i].clk)) {
2951                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
2952                                 allowed_sclk_vddc_table->entries[i].clk;
2953                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
2954                         pi->dpm_table.sclk_table.count++;
2955                 }
2956         }
2957
2958         pi->dpm_table.mclk_table.count = 0;
2959         for (i = 0; i < allowed_mclk_table->count; i++) {
2960                 if ((i==0) ||
2961                     (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
2962                      allowed_mclk_table->entries[i].clk)) {
2963                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
2964                                 allowed_mclk_table->entries[i].clk;
2965                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
2966                         pi->dpm_table.mclk_table.count++;
2967                 }
2968         }
2969
2970         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2971                 pi->dpm_table.vddc_table.dpm_levels[i].value =
2972                         allowed_sclk_vddc_table->entries[i].v;
2973                 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
2974                         std_voltage_table->entries[i].leakage;
2975                 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
2976         }
2977         pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
2978
2979         allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
2980         if (allowed_mclk_table) {
2981                 for (i = 0; i < allowed_mclk_table->count; i++) {
2982                         pi->dpm_table.vddci_table.dpm_levels[i].value =
2983                                 allowed_mclk_table->entries[i].v;
2984                         pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
2985                 }
2986                 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
2987         }
2988
2989         allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
2990         if (allowed_mclk_table) {
2991                 for (i = 0; i < allowed_mclk_table->count; i++) {
2992                         pi->dpm_table.mvdd_table.dpm_levels[i].value =
2993                                 allowed_mclk_table->entries[i].v;
2994                         pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
2995                 }
2996                 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
2997         }
2998
2999         ci_setup_default_pcie_tables(rdev);
3000
3001         return 0;
3002 }
3003
3004 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3005                               u32 value, u32 *boot_level)
3006 {
3007         u32 i;
3008         int ret = -EINVAL;
3009
3010         for(i = 0; i < table->count; i++) {
3011                 if (value == table->dpm_levels[i].value) {
3012                         *boot_level = i;
3013                         ret = 0;
3014                 }
3015         }
3016
3017         return ret;
3018 }
3019
3020 static int ci_init_smc_table(struct radeon_device *rdev)
3021 {
3022         struct ci_power_info *pi = ci_get_pi(rdev);
3023         struct ci_ulv_parm *ulv = &pi->ulv;
3024         struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
3025         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3026         int ret;
3027
3028         ret = ci_setup_default_dpm_tables(rdev);
3029         if (ret)
3030                 return ret;
3031
3032         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3033                 ci_populate_smc_voltage_tables(rdev, table);
3034
3035         ci_init_fps_limits(rdev);
3036
3037         if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3038                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3039
3040         if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3041                 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3042
3043         if (pi->mem_gddr5)
3044                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3045
3046         if (ulv->supported) {
3047                 ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3048                 if (ret)
3049                         return ret;
3050                 WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3051         }
3052
3053         ret = ci_populate_all_graphic_levels(rdev);
3054         if (ret)
3055                 return ret;
3056
3057         ret = ci_populate_all_memory_levels(rdev);
3058         if (ret)
3059                 return ret;
3060
3061         ci_populate_smc_link_level(rdev, table);
3062
3063         ret = ci_populate_smc_acpi_level(rdev, table);
3064         if (ret)
3065                 return ret;
3066
3067         ret = ci_populate_smc_vce_level(rdev, table);
3068         if (ret)
3069                 return ret;
3070
3071         ret = ci_populate_smc_acp_level(rdev, table);
3072         if (ret)
3073                 return ret;
3074
3075         ret = ci_populate_smc_samu_level(rdev, table);
3076         if (ret)
3077                 return ret;
3078
3079         ret = ci_do_program_memory_timing_parameters(rdev);
3080         if (ret)
3081                 return ret;
3082
3083         ret = ci_populate_smc_uvd_level(rdev, table);
3084         if (ret)
3085                 return ret;
3086
3087         table->UvdBootLevel  = 0;
3088         table->VceBootLevel  = 0;
3089         table->AcpBootLevel  = 0;
3090         table->SamuBootLevel  = 0;
3091         table->GraphicsBootLevel  = 0;
3092         table->MemoryBootLevel  = 0;
3093
3094         ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3095                                  pi->vbios_boot_state.sclk_bootup_value,
3096                                  (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3097
3098         ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3099                                  pi->vbios_boot_state.mclk_bootup_value,
3100                                  (u32 *)&pi->smc_state_table.MemoryBootLevel);
3101
3102         table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3103         table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3104         table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3105
3106         ci_populate_smc_initial_state(rdev, radeon_boot_state);
3107
3108         ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3109         if (ret)
3110                 return ret;
3111
3112         table->UVDInterval = 1;
3113         table->VCEInterval = 1;
3114         table->ACPInterval = 1;
3115         table->SAMUInterval = 1;
3116         table->GraphicsVoltageChangeEnable = 1;
3117         table->GraphicsThermThrottleEnable = 1;
3118         table->GraphicsInterval = 1;
3119         table->VoltageInterval = 1;
3120         table->ThermalInterval = 1;
3121         table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3122                                              CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3123         table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3124                                             CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3125         table->MemoryVoltageChangeEnable = 1;
3126         table->MemoryInterval = 1;
3127         table->VoltageResponseTime = 0;
3128         table->VddcVddciDelta = 4000;
3129         table->PhaseResponseTime = 0;
3130         table->MemoryThermThrottleEnable = 1;
3131         table->PCIeBootLinkLevel = 0;
3132         table->PCIeGenInterval = 1;
3133         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3134                 table->SVI2Enable  = 1;
3135         else
3136                 table->SVI2Enable  = 0;
3137
3138         table->ThermGpio = 17;
3139         table->SclkStepSize = 0x4000;
3140
3141         table->SystemFlags = cpu_to_be32(table->SystemFlags);
3142         table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3143         table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3144         table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3145         table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3146         table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3147         table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3148         table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3149         table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3150         table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3151         table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3152         table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3153         table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3154         table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3155
3156         ret = ci_copy_bytes_to_smc(rdev,
3157                                    pi->dpm_table_start +
3158                                    offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3159                                    (u8 *)&table->SystemFlags,
3160                                    sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3161                                    pi->sram_end);
3162         if (ret)
3163                 return ret;
3164
3165         return 0;
3166 }
3167
3168 static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3169                                       struct ci_single_dpm_table *dpm_table,
3170                                       u32 low_limit, u32 high_limit)
3171 {
3172         u32 i;
3173
3174         for (i = 0; i < dpm_table->count; i++) {
3175                 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3176                     (dpm_table->dpm_levels[i].value > high_limit))
3177                         dpm_table->dpm_levels[i].enabled = false;
3178                 else
3179                         dpm_table->dpm_levels[i].enabled = true;
3180         }
3181 }
3182
3183 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3184                                     u32 speed_low, u32 lanes_low,
3185                                     u32 speed_high, u32 lanes_high)
3186 {
3187         struct ci_power_info *pi = ci_get_pi(rdev);
3188         struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3189         u32 i, j;
3190
3191         for (i = 0; i < pcie_table->count; i++) {
3192                 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3193                     (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3194                     (pcie_table->dpm_levels[i].value > speed_high) ||
3195                     (pcie_table->dpm_levels[i].param1 > lanes_high))
3196                         pcie_table->dpm_levels[i].enabled = false;
3197                 else
3198                         pcie_table->dpm_levels[i].enabled = true;
3199         }
3200
3201         for (i = 0; i < pcie_table->count; i++) {
3202                 if (pcie_table->dpm_levels[i].enabled) {
3203                         for (j = i + 1; j < pcie_table->count; j++) {
3204                                 if (pcie_table->dpm_levels[j].enabled) {
3205                                         if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3206                                             (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3207                                                 pcie_table->dpm_levels[j].enabled = false;
3208                                 }
3209                         }
3210                 }
3211         }
3212 }
3213
3214 static int ci_trim_dpm_states(struct radeon_device *rdev,
3215                               struct radeon_ps *radeon_state)
3216 {
3217         struct ci_ps *state = ci_get_ps(radeon_state);
3218         struct ci_power_info *pi = ci_get_pi(rdev);
3219         u32 high_limit_count;
3220
3221         if (state->performance_level_count < 1)
3222                 return -EINVAL;
3223
3224         if (state->performance_level_count == 1)
3225                 high_limit_count = 0;
3226         else
3227                 high_limit_count = 1;
3228
3229         ci_trim_single_dpm_states(rdev,
3230                                   &pi->dpm_table.sclk_table,
3231                                   state->performance_levels[0].sclk,
3232                                   state->performance_levels[high_limit_count].sclk);
3233
3234         ci_trim_single_dpm_states(rdev,
3235                                   &pi->dpm_table.mclk_table,
3236                                   state->performance_levels[0].mclk,
3237                                   state->performance_levels[high_limit_count].mclk);
3238
3239         ci_trim_pcie_dpm_states(rdev,
3240                                 state->performance_levels[0].pcie_gen,
3241                                 state->performance_levels[0].pcie_lane,
3242                                 state->performance_levels[high_limit_count].pcie_gen,
3243                                 state->performance_levels[high_limit_count].pcie_lane);
3244
3245         return 0;
3246 }
3247
3248 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3249 {
3250         struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3251                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3252         struct radeon_clock_voltage_dependency_table *vddc_table =
3253                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3254         u32 requested_voltage = 0;
3255         u32 i;
3256
3257         if (disp_voltage_table == NULL)
3258                 return -EINVAL;
3259         if (!disp_voltage_table->count)
3260                 return -EINVAL;
3261
3262         for (i = 0; i < disp_voltage_table->count; i++) {
3263                 if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3264                         requested_voltage = disp_voltage_table->entries[i].v;
3265         }
3266
3267         for (i = 0; i < vddc_table->count; i++) {
3268                 if (requested_voltage <= vddc_table->entries[i].v) {
3269                         requested_voltage = vddc_table->entries[i].v;
3270                         return (ci_send_msg_to_smc_with_parameter(rdev,
3271                                                                   PPSMC_MSG_VddC_Request,
3272                                                                   requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3273                                 0 : -EINVAL;
3274                 }
3275         }
3276
3277         return -EINVAL;
3278 }
3279
3280 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3281 {
3282         struct ci_power_info *pi = ci_get_pi(rdev);
3283         PPSMC_Result result;
3284
3285         if (!pi->sclk_dpm_key_disabled) {
3286                 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3287                         result = ci_send_msg_to_smc_with_parameter(rdev,
3288                                                                    PPSMC_MSG_SCLKDPM_SetEnabledMask,
3289                                                                    pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3290                         if (result != PPSMC_Result_OK)
3291                                 return -EINVAL;
3292                 }
3293         }
3294
3295         if (!pi->mclk_dpm_key_disabled) {
3296                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3297                         result = ci_send_msg_to_smc_with_parameter(rdev,
3298                                                                    PPSMC_MSG_MCLKDPM_SetEnabledMask,
3299                                                                    pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3300                         if (result != PPSMC_Result_OK)
3301                                 return -EINVAL;
3302                 }
3303         }
3304
3305         if (!pi->pcie_dpm_key_disabled) {
3306                 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3307                         result = ci_send_msg_to_smc_with_parameter(rdev,
3308                                                                    PPSMC_MSG_PCIeDPM_SetEnabledMask,
3309                                                                    pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3310                         if (result != PPSMC_Result_OK)
3311                                 return -EINVAL;
3312                 }
3313         }
3314
3315         ci_apply_disp_minimum_voltage_request(rdev);
3316
3317         return 0;
3318 }
3319
3320 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3321                                                    struct radeon_ps *radeon_state)
3322 {
3323         struct ci_power_info *pi = ci_get_pi(rdev);
3324         struct ci_ps *state = ci_get_ps(radeon_state);
3325         struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3326         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3327         struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3328         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3329         u32 i;
3330
3331         pi->need_update_smu7_dpm_table = 0;
3332
3333         for (i = 0; i < sclk_table->count; i++) {
3334                 if (sclk == sclk_table->dpm_levels[i].value)
3335                         break;
3336         }
3337
3338         if (i >= sclk_table->count) {
3339                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3340         } else {
3341                 /* XXX check display min clock requirements */
3342                 if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
3343                         pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3344         }
3345
3346         for (i = 0; i < mclk_table->count; i++) {
3347                 if (mclk == mclk_table->dpm_levels[i].value)
3348                         break;
3349         }
3350
3351         if (i >= mclk_table->count)
3352                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3353
3354         if (rdev->pm.dpm.current_active_crtc_count !=
3355             rdev->pm.dpm.new_active_crtc_count)
3356                 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3357 }
3358
3359 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3360                                                        struct radeon_ps *radeon_state)
3361 {
3362         struct ci_power_info *pi = ci_get_pi(rdev);
3363         struct ci_ps *state = ci_get_ps(radeon_state);
3364         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3365         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3366         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3367         int ret;
3368
3369         if (!pi->need_update_smu7_dpm_table)
3370                 return 0;
3371
3372         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3373                 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3374
3375         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3376                 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3377
3378         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3379                 ret = ci_populate_all_graphic_levels(rdev);
3380                 if (ret)
3381                         return ret;
3382         }
3383
3384         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3385                 ret = ci_populate_all_memory_levels(rdev);
3386                 if (ret)
3387                         return ret;
3388         }
3389
3390         return 0;
3391 }
3392
3393 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3394 {
3395         struct ci_power_info *pi = ci_get_pi(rdev);
3396         const struct radeon_clock_and_voltage_limits *max_limits;
3397         int i;
3398
3399         if (rdev->pm.dpm.ac_power)
3400                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3401         else
3402                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3403
3404         if (enable) {
3405                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3406
3407                 for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3408                         if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3409                                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3410
3411                                 if (!pi->caps_uvd_dpm)
3412                                         break;
3413                         }
3414                 }
3415
3416                 ci_send_msg_to_smc_with_parameter(rdev,
3417                                                   PPSMC_MSG_UVDDPM_SetEnabledMask,
3418                                                   pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3419
3420                 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3421                         pi->uvd_enabled = true;
3422                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3423                         ci_send_msg_to_smc_with_parameter(rdev,
3424                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
3425                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3426                 }
3427         } else {
3428                 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3429                         pi->uvd_enabled = false;
3430                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3431                         ci_send_msg_to_smc_with_parameter(rdev,
3432                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
3433                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3434                 }
3435         }
3436
3437         return (ci_send_msg_to_smc(rdev, enable ?
3438                                    PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3439                 0 : -EINVAL;
3440 }
3441
3442 #if 0
3443 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3444 {
3445         struct ci_power_info *pi = ci_get_pi(rdev);
3446         const struct radeon_clock_and_voltage_limits *max_limits;
3447         int i;
3448
3449         if (rdev->pm.dpm.ac_power)
3450                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3451         else
3452                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3453
3454         if (enable) {
3455                 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3456                 for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3457                         if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3458                                 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3459
3460                                 if (!pi->caps_vce_dpm)
3461                                         break;
3462                         }
3463                 }
3464
3465                 ci_send_msg_to_smc_with_parameter(rdev,
3466                                                   PPSMC_MSG_VCEDPM_SetEnabledMask,
3467                                                   pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3468         }
3469
3470         return (ci_send_msg_to_smc(rdev, enable ?
3471                                    PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3472                 0 : -EINVAL;
3473 }
3474
3475 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3476 {
3477         struct ci_power_info *pi = ci_get_pi(rdev);
3478         const struct radeon_clock_and_voltage_limits *max_limits;
3479         int i;
3480
3481         if (rdev->pm.dpm.ac_power)
3482                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3483         else
3484                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3485
3486         if (enable) {
3487                 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3488                 for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3489                         if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3490                                 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3491
3492                                 if (!pi->caps_samu_dpm)
3493                                         break;
3494                         }
3495                 }
3496
3497                 ci_send_msg_to_smc_with_parameter(rdev,
3498                                                   PPSMC_MSG_SAMUDPM_SetEnabledMask,
3499                                                   pi->dpm_level_enable_mask.samu_dpm_enable_mask);
3500         }
3501         return (ci_send_msg_to_smc(rdev, enable ?
3502                                    PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
3503                 0 : -EINVAL;
3504 }
3505
3506 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
3507 {
3508         struct ci_power_info *pi = ci_get_pi(rdev);
3509         const struct radeon_clock_and_voltage_limits *max_limits;
3510         int i;
3511
3512         if (rdev->pm.dpm.ac_power)
3513                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3514         else
3515                 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3516
3517         if (enable) {
3518                 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
3519                 for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3520                         if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3521                                 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
3522
3523                                 if (!pi->caps_acp_dpm)
3524                                         break;
3525                         }
3526                 }
3527
3528                 ci_send_msg_to_smc_with_parameter(rdev,
3529                                                   PPSMC_MSG_ACPDPM_SetEnabledMask,
3530                                                   pi->dpm_level_enable_mask.acp_dpm_enable_mask);
3531         }
3532
3533         return (ci_send_msg_to_smc(rdev, enable ?
3534                                    PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
3535                 0 : -EINVAL;
3536 }
3537 #endif
3538
3539 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
3540 {
3541         struct ci_power_info *pi = ci_get_pi(rdev);
3542         u32 tmp;
3543
3544         if (!gate) {
3545                 if (pi->caps_uvd_dpm ||
3546                     (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
3547                         pi->smc_state_table.UvdBootLevel = 0;
3548                 else
3549                         pi->smc_state_table.UvdBootLevel =
3550                                 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
3551
3552                 tmp = RREG32_SMC(DPM_TABLE_475);
3553                 tmp &= ~UvdBootLevel_MASK;
3554                 tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
3555                 WREG32_SMC(DPM_TABLE_475, tmp);
3556         }
3557
3558         return ci_enable_uvd_dpm(rdev, !gate);
3559 }
3560
3561 #if 0
3562 static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
3563 {
3564         u8 i;
3565         u32 min_evclk = 30000; /* ??? */
3566         struct radeon_vce_clock_voltage_dependency_table *table =
3567                 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
3568
3569         for (i = 0; i < table->count; i++) {
3570                 if (table->entries[i].evclk >= min_evclk)
3571                         return i;
3572         }
3573
3574         return table->count - 1;
3575 }
3576
3577 static int ci_update_vce_dpm(struct radeon_device *rdev,
3578                              struct radeon_ps *radeon_new_state,
3579                              struct radeon_ps *radeon_current_state)
3580 {
3581         struct ci_power_info *pi = ci_get_pi(rdev);
3582         bool new_vce_clock_non_zero = (radeon_new_state->evclk != 0);
3583         bool old_vce_clock_non_zero = (radeon_current_state->evclk != 0);
3584         int ret = 0;
3585         u32 tmp;
3586
3587         if (new_vce_clock_non_zero != old_vce_clock_non_zero) {
3588                 if (new_vce_clock_non_zero) {
3589                         pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
3590
3591                         tmp = RREG32_SMC(DPM_TABLE_475);
3592                         tmp &= ~VceBootLevel_MASK;
3593                         tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
3594                         WREG32_SMC(DPM_TABLE_475, tmp);
3595
3596                         ret = ci_enable_vce_dpm(rdev, true);
3597                 } else {
3598                         ret = ci_enable_vce_dpm(rdev, false);
3599                 }
3600         }
3601         return ret;
3602 }
3603
3604 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
3605 {
3606         return ci_enable_samu_dpm(rdev, gate);
3607 }
3608
3609 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
3610 {
3611         struct ci_power_info *pi = ci_get_pi(rdev);
3612         u32 tmp;
3613
3614         if (!gate) {
3615                 pi->smc_state_table.AcpBootLevel = 0;
3616
3617                 tmp = RREG32_SMC(DPM_TABLE_475);
3618                 tmp &= ~AcpBootLevel_MASK;
3619                 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
3620                 WREG32_SMC(DPM_TABLE_475, tmp);
3621         }
3622
3623         return ci_enable_acp_dpm(rdev, !gate);
3624 }
3625 #endif
3626
3627 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
3628                                              struct radeon_ps *radeon_state)
3629 {
3630         struct ci_power_info *pi = ci_get_pi(rdev);
3631         int ret;
3632
3633         ret = ci_trim_dpm_states(rdev, radeon_state);
3634         if (ret)
3635                 return ret;
3636
3637         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3638                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
3639         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3640                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
3641         pi->last_mclk_dpm_enable_mask =
3642                 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3643         if (pi->uvd_enabled) {
3644                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
3645                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3646         }
3647         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
3648                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
3649
3650         return 0;
3651 }
3652
3653 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
3654                                        u32 level_mask)
3655 {
3656         u32 level = 0;
3657
3658         while ((level_mask & (1 << level)) == 0)
3659                 level++;
3660
3661         return level;
3662 }
3663
3664
3665 int ci_dpm_force_performance_level(struct radeon_device *rdev,
3666                                    enum radeon_dpm_forced_level level)
3667 {
3668         struct ci_power_info *pi = ci_get_pi(rdev);
3669         PPSMC_Result smc_result;
3670         u32 tmp, levels, i;
3671         int ret;
3672
3673         if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
3674                 if ((!pi->sclk_dpm_key_disabled) &&
3675                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3676                         levels = 0;
3677                         tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
3678                         while (tmp >>= 1)
3679                                 levels++;
3680                         if (levels) {
3681                                 ret = ci_dpm_force_state_sclk(rdev, levels);
3682                                 if (ret)
3683                                         return ret;
3684                                 for (i = 0; i < rdev->usec_timeout; i++) {
3685                                         tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3686                                                CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3687                                         if (tmp == levels)
3688                                                 break;
3689                                         udelay(1);
3690                                 }
3691                         }
3692                 }
3693                 if ((!pi->mclk_dpm_key_disabled) &&
3694                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3695                         levels = 0;
3696                         tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3697                         while (tmp >>= 1)
3698                                 levels++;
3699                         if (levels) {
3700                                 ret = ci_dpm_force_state_mclk(rdev, levels);
3701                                 if (ret)
3702                                         return ret;
3703                                 for (i = 0; i < rdev->usec_timeout; i++) {
3704                                         tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3705                                                CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3706                                         if (tmp == levels)
3707                                                 break;
3708                                         udelay(1);
3709                                 }
3710                         }
3711                 }
3712                 if ((!pi->pcie_dpm_key_disabled) &&
3713                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3714                         levels = 0;
3715                         tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
3716                         while (tmp >>= 1)
3717                                 levels++;
3718                         if (levels) {
3719                                 ret = ci_dpm_force_state_pcie(rdev, level);
3720                                 if (ret)
3721                                         return ret;
3722                                 for (i = 0; i < rdev->usec_timeout; i++) {
3723                                         tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3724                                                CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3725                                         if (tmp == levels)
3726                                                 break;
3727                                         udelay(1);
3728                                 }
3729                         }
3730                 }
3731         } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
3732                 if ((!pi->sclk_dpm_key_disabled) &&
3733                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3734                         levels = ci_get_lowest_enabled_level(rdev,
3735                                                              pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3736                         ret = ci_dpm_force_state_sclk(rdev, levels);
3737                         if (ret)
3738                                 return ret;
3739                         for (i = 0; i < rdev->usec_timeout; i++) {
3740                                 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3741                                        CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3742                                 if (tmp == levels)
3743                                         break;
3744                                 udelay(1);
3745                         }
3746                 }
3747                 if ((!pi->mclk_dpm_key_disabled) &&
3748                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3749                         levels = ci_get_lowest_enabled_level(rdev,
3750                                                              pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3751                         ret = ci_dpm_force_state_mclk(rdev, levels);
3752                         if (ret)
3753                                 return ret;
3754                         for (i = 0; i < rdev->usec_timeout; i++) {
3755                                 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3756                                        CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3757                                 if (tmp == levels)
3758                                         break;
3759                                 udelay(1);
3760                         }
3761                 }
3762                 if ((!pi->pcie_dpm_key_disabled) &&
3763                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3764                         levels = ci_get_lowest_enabled_level(rdev,
3765                                                              pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3766                         ret = ci_dpm_force_state_pcie(rdev, levels);
3767                         if (ret)
3768                                 return ret;
3769                         for (i = 0; i < rdev->usec_timeout; i++) {
3770                                 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3771                                        CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3772                                 if (tmp == levels)
3773                                         break;
3774                                 udelay(1);
3775                         }
3776                 }
3777         } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
3778                 if (!pi->sclk_dpm_key_disabled) {
3779                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
3780                         if (smc_result != PPSMC_Result_OK)
3781                                 return -EINVAL;
3782                 }
3783                 if (!pi->mclk_dpm_key_disabled) {
3784                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
3785                         if (smc_result != PPSMC_Result_OK)
3786                                 return -EINVAL;
3787                 }
3788                 if (!pi->pcie_dpm_key_disabled) {
3789                         smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
3790                         if (smc_result != PPSMC_Result_OK)
3791                                 return -EINVAL;
3792                 }
3793         }
3794
3795         rdev->pm.dpm.forced_level = level;
3796
3797         return 0;
3798 }
3799
3800 static int ci_set_mc_special_registers(struct radeon_device *rdev,
3801                                        struct ci_mc_reg_table *table)
3802 {
3803         struct ci_power_info *pi = ci_get_pi(rdev);
3804         u8 i, j, k;
3805         u32 temp_reg;
3806
3807         for (i = 0, j = table->last; i < table->last; i++) {
3808                 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3809                         return -EINVAL;
3810                 switch(table->mc_reg_address[i].s1 << 2) {
3811                 case MC_SEQ_MISC1:
3812                         temp_reg = RREG32(MC_PMG_CMD_EMRS);
3813                         table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
3814                         table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3815                         for (k = 0; k < table->num_entries; k++) {
3816                                 table->mc_reg_table_entry[k].mc_data[j] =
3817                                         ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3818                         }
3819                         j++;
3820                         if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3821                                 return -EINVAL;
3822
3823                         temp_reg = RREG32(MC_PMG_CMD_MRS);
3824                         table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
3825                         table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3826                         for (k = 0; k < table->num_entries; k++) {
3827                                 table->mc_reg_table_entry[k].mc_data[j] =
3828                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3829                                 if (!pi->mem_gddr5)
3830                                         table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3831                         }
3832                         j++;
3833                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3834                                 return -EINVAL;
3835
3836                         if (!pi->mem_gddr5) {
3837                                 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
3838                                 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
3839                                 for (k = 0; k < table->num_entries; k++) {
3840                                         table->mc_reg_table_entry[k].mc_data[j] =
3841                                                 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3842                                 }
3843                                 j++;
3844                                 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3845                                         return -EINVAL;
3846                         }
3847                         break;
3848                 case MC_SEQ_RESERVE_M:
3849                         temp_reg = RREG32(MC_PMG_CMD_MRS1);
3850                         table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
3851                         table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3852                         for (k = 0; k < table->num_entries; k++) {
3853                                 table->mc_reg_table_entry[k].mc_data[j] =
3854                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3855                         }
3856                         j++;
3857                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3858                                 return -EINVAL;
3859                         break;
3860                 default:
3861                         break;
3862                 }
3863
3864         }
3865
3866         table->last = j;
3867
3868         return 0;
3869 }
3870
3871 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
3872 {
3873         bool result = true;
3874
3875         switch(in_reg) {
3876         case MC_SEQ_RAS_TIMING >> 2:
3877                 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
3878                 break;
3879         case MC_SEQ_DLL_STBY >> 2:
3880                 *out_reg = MC_SEQ_DLL_STBY_LP >> 2;
3881                 break;
3882         case MC_SEQ_G5PDX_CMD0 >> 2:
3883                 *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
3884                 break;
3885         case MC_SEQ_G5PDX_CMD1 >> 2:
3886                 *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
3887                 break;
3888         case MC_SEQ_G5PDX_CTRL >> 2:
3889                 *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
3890                 break;
3891         case MC_SEQ_CAS_TIMING >> 2:
3892                 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
3893             break;
3894         case MC_SEQ_MISC_TIMING >> 2:
3895                 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
3896                 break;
3897         case MC_SEQ_MISC_TIMING2 >> 2:
3898                 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
3899                 break;
3900         case MC_SEQ_PMG_DVS_CMD >> 2:
3901                 *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
3902                 break;
3903         case MC_SEQ_PMG_DVS_CTL >> 2:
3904                 *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
3905                 break;
3906         case MC_SEQ_RD_CTL_D0 >> 2:
3907                 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
3908                 break;
3909         case MC_SEQ_RD_CTL_D1 >> 2:
3910                 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
3911                 break;
3912         case MC_SEQ_WR_CTL_D0 >> 2:
3913                 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
3914                 break;
3915         case MC_SEQ_WR_CTL_D1 >> 2:
3916                 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
3917                 break;
3918         case MC_PMG_CMD_EMRS >> 2:
3919                 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3920                 break;
3921         case MC_PMG_CMD_MRS >> 2:
3922                 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3923                 break;
3924         case MC_PMG_CMD_MRS1 >> 2:
3925                 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3926                 break;
3927         case MC_SEQ_PMG_TIMING >> 2:
3928                 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
3929                 break;
3930         case MC_PMG_CMD_MRS2 >> 2:
3931                 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
3932                 break;
3933         case MC_SEQ_WR_CTL_2 >> 2:
3934                 *out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
3935                 break;
3936         default:
3937                 result = false;
3938                 break;
3939         }
3940
3941         return result;
3942 }
3943
3944 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
3945 {
3946         u8 i, j;
3947
3948         for (i = 0; i < table->last; i++) {
3949                 for (j = 1; j < table->num_entries; j++) {
3950                         if (table->mc_reg_table_entry[j-1].mc_data[i] !=
3951                             table->mc_reg_table_entry[j].mc_data[i]) {
3952                                 table->valid_flag |= 1 << i;
3953                                 break;
3954                         }
3955                 }
3956         }
3957 }
3958
3959 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
3960 {
3961         u32 i;
3962         u16 address;
3963
3964         for (i = 0; i < table->last; i++) {
3965                 table->mc_reg_address[i].s0 =
3966                         ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
3967                         address : table->mc_reg_address[i].s1;
3968         }
3969 }
3970
3971 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
3972                                       struct ci_mc_reg_table *ci_table)
3973 {
3974         u8 i, j;
3975
3976         if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3977                 return -EINVAL;
3978         if (table->num_entries > MAX_AC_TIMING_ENTRIES)
3979                 return -EINVAL;
3980
3981         for (i = 0; i < table->last; i++)
3982                 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
3983
3984         ci_table->last = table->last;
3985
3986         for (i = 0; i < table->num_entries; i++) {
3987                 ci_table->mc_reg_table_entry[i].mclk_max =
3988                         table->mc_reg_table_entry[i].mclk_max;
3989                 for (j = 0; j < table->last; j++)
3990                         ci_table->mc_reg_table_entry[i].mc_data[j] =
3991                                 table->mc_reg_table_entry[i].mc_data[j];
3992         }
3993         ci_table->num_entries = table->num_entries;
3994
3995         return 0;
3996 }
3997
3998 static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
3999 {
4000         struct ci_power_info *pi = ci_get_pi(rdev);
4001         struct atom_mc_reg_table *table;
4002         struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4003         u8 module_index = rv770_get_memory_module_index(rdev);
4004         int ret;
4005
4006         table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4007         if (!table)
4008                 return -ENOMEM;
4009
4010         WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
4011         WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
4012         WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
4013         WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
4014         WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
4015         WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
4016         WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
4017         WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
4018         WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
4019         WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
4020         WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
4021         WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
4022         WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
4023         WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
4024         WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
4025         WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4026         WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4027         WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4028         WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4029         WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4030
4031         ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4032         if (ret)
4033                 goto init_mc_done;
4034
4035         ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4036         if (ret)
4037                 goto init_mc_done;
4038
4039         ci_set_s0_mc_reg_index(ci_table);
4040
4041         ret = ci_set_mc_special_registers(rdev, ci_table);
4042         if (ret)
4043                 goto init_mc_done;
4044
4045         ci_set_valid_flag(ci_table);
4046
4047 init_mc_done:
4048         kfree(table);
4049
4050         return ret;
4051 }
4052
4053 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4054                                         SMU7_Discrete_MCRegisters *mc_reg_table)
4055 {
4056         struct ci_power_info *pi = ci_get_pi(rdev);
4057         u32 i, j;
4058
4059         for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4060                 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4061                         if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4062                                 return -EINVAL;
4063                         mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4064                         mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4065                         i++;
4066                 }
4067         }
4068
4069         mc_reg_table->last = (u8)i;
4070
4071         return 0;
4072 }
4073
4074 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4075                                     SMU7_Discrete_MCRegisterSet *data,
4076                                     u32 num_entries, u32 valid_flag)
4077 {
4078         u32 i, j;
4079
4080         for (i = 0, j = 0; j < num_entries; j++) {
4081                 if (valid_flag & (1 << j)) {
4082                         data->value[i] = cpu_to_be32(entry->mc_data[j]);
4083                         i++;
4084                 }
4085         }
4086 }
4087
4088 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4089                                                  const u32 memory_clock,
4090                                                  SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4091 {
4092         struct ci_power_info *pi = ci_get_pi(rdev);
4093         u32 i = 0;
4094
4095         for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4096                 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4097                         break;
4098         }
4099
4100         if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4101                 --i;
4102
4103         ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4104                                 mc_reg_table_data, pi->mc_reg_table.last,
4105                                 pi->mc_reg_table.valid_flag);
4106 }
4107
4108 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4109                                            SMU7_Discrete_MCRegisters *mc_reg_table)
4110 {
4111         struct ci_power_info *pi = ci_get_pi(rdev);
4112         u32 i;
4113
4114         for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4115                 ci_convert_mc_reg_table_entry_to_smc(rdev,
4116                                                      pi->dpm_table.mclk_table.dpm_levels[i].value,
4117                                                      &mc_reg_table->data[i]);
4118 }
4119
4120 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4121 {
4122         struct ci_power_info *pi = ci_get_pi(rdev);
4123         int ret;
4124
4125         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4126
4127         ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4128         if (ret)
4129                 return ret;
4130         ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4131
4132         return ci_copy_bytes_to_smc(rdev,
4133                                     pi->mc_reg_table_start,
4134                                     (u8 *)&pi->smc_mc_reg_table,
4135                                     sizeof(SMU7_Discrete_MCRegisters),
4136                                     pi->sram_end);
4137 }
4138
4139 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4140 {
4141         struct ci_power_info *pi = ci_get_pi(rdev);
4142
4143         if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4144                 return 0;
4145
4146         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4147
4148         ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4149
4150         return ci_copy_bytes_to_smc(rdev,
4151                                     pi->mc_reg_table_start +
4152                                     offsetof(SMU7_Discrete_MCRegisters, data[0]),
4153                                     (u8 *)&pi->smc_mc_reg_table.data[0],
4154                                     sizeof(SMU7_Discrete_MCRegisterSet) *
4155                                     pi->dpm_table.mclk_table.count,
4156                                     pi->sram_end);
4157 }
4158
4159 static void ci_enable_voltage_control(struct radeon_device *rdev)
4160 {
4161         u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4162
4163         tmp |= VOLT_PWRMGT_EN;
4164         WREG32_SMC(GENERAL_PWRMGT, tmp);
4165 }
4166
4167 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4168                                                       struct radeon_ps *radeon_state)
4169 {
4170         struct ci_ps *state = ci_get_ps(radeon_state);
4171         int i;
4172         u16 pcie_speed, max_speed = 0;
4173
4174         for (i = 0; i < state->performance_level_count; i++) {
4175                 pcie_speed = state->performance_levels[i].pcie_gen;
4176                 if (max_speed < pcie_speed)
4177                         max_speed = pcie_speed;
4178         }
4179
4180         return max_speed;
4181 }
4182
4183 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4184 {
4185         u32 speed_cntl = 0;
4186
4187         speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4188         speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4189
4190         return (u16)speed_cntl;
4191 }
4192
4193 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4194 {
4195         u32 link_width = 0;
4196
4197         link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4198         link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4199
4200         switch (link_width) {
4201         case RADEON_PCIE_LC_LINK_WIDTH_X1:
4202                 return 1;
4203         case RADEON_PCIE_LC_LINK_WIDTH_X2:
4204                 return 2;
4205         case RADEON_PCIE_LC_LINK_WIDTH_X4:
4206                 return 4;
4207         case RADEON_PCIE_LC_LINK_WIDTH_X8:
4208                 return 8;
4209         case RADEON_PCIE_LC_LINK_WIDTH_X12:
4210                 /* not actually supported */
4211                 return 12;
4212         case RADEON_PCIE_LC_LINK_WIDTH_X0:
4213         case RADEON_PCIE_LC_LINK_WIDTH_X16:
4214         default:
4215                 return 16;
4216         }
4217 }
4218
4219 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4220                                                              struct radeon_ps *radeon_new_state,
4221                                                              struct radeon_ps *radeon_current_state)
4222 {
4223         struct ci_power_info *pi = ci_get_pi(rdev);
4224         enum radeon_pcie_gen target_link_speed =
4225                 ci_get_maximum_link_speed(rdev, radeon_new_state);
4226         enum radeon_pcie_gen current_link_speed;
4227
4228         if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4229                 current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4230         else
4231                 current_link_speed = pi->force_pcie_gen;
4232
4233         pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4234         pi->pspp_notify_required = false;
4235         if (target_link_speed > current_link_speed) {
4236                 switch (target_link_speed) {
4237 #ifdef CONFIG_ACPI
4238                 case RADEON_PCIE_GEN3:
4239                         if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4240                                 break;
4241                         pi->force_pcie_gen = RADEON_PCIE_GEN2;
4242                         if (current_link_speed == RADEON_PCIE_GEN2)
4243                                 break;
4244                 case RADEON_PCIE_GEN2:
4245                         if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4246                                 break;
4247 #endif
4248                 default:
4249                         pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4250                         break;
4251                 }
4252         } else {
4253                 if (target_link_speed < current_link_speed)
4254                         pi->pspp_notify_required = true;
4255         }
4256 }
4257
4258 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4259                                                            struct radeon_ps *radeon_new_state,
4260                                                            struct radeon_ps *radeon_current_state)
4261 {
4262         struct ci_power_info *pi = ci_get_pi(rdev);
4263         enum radeon_pcie_gen target_link_speed =
4264                 ci_get_maximum_link_speed(rdev, radeon_new_state);
4265         u8 request;
4266
4267         if (pi->pspp_notify_required) {
4268                 if (target_link_speed == RADEON_PCIE_GEN3)
4269                         request = PCIE_PERF_REQ_PECI_GEN3;
4270                 else if (target_link_speed == RADEON_PCIE_GEN2)
4271                         request = PCIE_PERF_REQ_PECI_GEN2;
4272                 else
4273                         request = PCIE_PERF_REQ_PECI_GEN1;
4274
4275                 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4276                     (ci_get_current_pcie_speed(rdev) > 0))
4277                         return;
4278
4279 #ifdef CONFIG_ACPI
4280                 radeon_acpi_pcie_performance_request(rdev, request, false);
4281 #endif
4282         }
4283 }
4284
4285 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4286 {
4287         struct ci_power_info *pi = ci_get_pi(rdev);
4288         struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4289                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4290         struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4291                 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4292         struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4293                 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4294
4295         if (allowed_sclk_vddc_table == NULL)
4296                 return -EINVAL;
4297         if (allowed_sclk_vddc_table->count < 1)
4298                 return -EINVAL;
4299         if (allowed_mclk_vddc_table == NULL)
4300                 return -EINVAL;
4301         if (allowed_mclk_vddc_table->count < 1)
4302                 return -EINVAL;
4303         if (allowed_mclk_vddci_table == NULL)
4304                 return -EINVAL;
4305         if (allowed_mclk_vddci_table->count < 1)
4306                 return -EINVAL;
4307
4308         pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4309         pi->max_vddc_in_pp_table =
4310                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4311
4312         pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4313         pi->max_vddci_in_pp_table =
4314                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4315
4316         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4317                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4318         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4319                 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4320         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4321                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4322         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4323                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4324
4325         return 0;
4326 }
4327
4328 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4329 {
4330         struct ci_power_info *pi = ci_get_pi(rdev);
4331         struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4332         u32 leakage_index;
4333
4334         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4335                 if (leakage_table->leakage_id[leakage_index] == *vddc) {
4336                         *vddc = leakage_table->actual_voltage[leakage_index];
4337                         break;
4338                 }
4339         }
4340 }
4341
4342 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4343 {
4344         struct ci_power_info *pi = ci_get_pi(rdev);
4345         struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4346         u32 leakage_index;
4347
4348         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4349                 if (leakage_table->leakage_id[leakage_index] == *vddci) {
4350                         *vddci = leakage_table->actual_voltage[leakage_index];
4351                         break;
4352                 }
4353         }
4354 }
4355
4356 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4357                                                                       struct radeon_clock_voltage_dependency_table *table)
4358 {
4359         u32 i;
4360
4361         if (table) {
4362                 for (i = 0; i < table->count; i++)
4363                         ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4364         }
4365 }
4366
4367 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4368                                                                        struct radeon_clock_voltage_dependency_table *table)
4369 {
4370         u32 i;
4371
4372         if (table) {
4373                 for (i = 0; i < table->count; i++)
4374                         ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4375         }
4376 }
4377
4378 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4379                                                                           struct radeon_vce_clock_voltage_dependency_table *table)
4380 {
4381         u32 i;
4382
4383         if (table) {
4384                 for (i = 0; i < table->count; i++)
4385                         ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4386         }
4387 }
4388
4389 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4390                                                                           struct radeon_uvd_clock_voltage_dependency_table *table)
4391 {
4392         u32 i;
4393
4394         if (table) {
4395                 for (i = 0; i < table->count; i++)
4396                         ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4397         }
4398 }
4399
4400 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
4401                                                                    struct radeon_phase_shedding_limits_table *table)
4402 {
4403         u32 i;
4404
4405         if (table) {
4406                 for (i = 0; i < table->count; i++)
4407                         ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
4408         }
4409 }
4410
4411 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
4412                                                             struct radeon_clock_and_voltage_limits *table)
4413 {
4414         if (table) {
4415                 ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
4416                 ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
4417         }
4418 }
4419
4420 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
4421                                                          struct radeon_cac_leakage_table *table)
4422 {
4423         u32 i;
4424
4425         if (table) {
4426                 for (i = 0; i < table->count; i++)
4427                         ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
4428         }
4429 }
4430
4431 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
4432 {
4433
4434         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4435                                                                   &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
4436         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4437                                                                   &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
4438         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4439                                                                   &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
4440         ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
4441                                                                    &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
4442         ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4443                                                                       &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
4444         ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4445                                                                       &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
4446         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4447                                                                   &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
4448         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4449                                                                   &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
4450         ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
4451                                                                &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
4452         ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4453                                                         &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
4454         ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4455                                                         &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
4456         ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
4457                                                      &rdev->pm.dpm.dyn_state.cac_leakage_table);
4458
4459 }
4460
4461 static void ci_get_memory_type(struct radeon_device *rdev)
4462 {
4463         struct ci_power_info *pi = ci_get_pi(rdev);
4464         u32 tmp;
4465
4466         tmp = RREG32(MC_SEQ_MISC0);
4467
4468         if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
4469             MC_SEQ_MISC0_GDDR5_VALUE)
4470                 pi->mem_gddr5 = true;
4471         else
4472                 pi->mem_gddr5 = false;
4473
4474 }
4475
4476 void ci_update_current_ps(struct radeon_device *rdev,
4477                           struct radeon_ps *rps)
4478 {
4479         struct ci_ps *new_ps = ci_get_ps(rps);
4480         struct ci_power_info *pi = ci_get_pi(rdev);
4481
4482         pi->current_rps = *rps;
4483         pi->current_ps = *new_ps;
4484         pi->current_rps.ps_priv = &pi->current_ps;
4485 }
4486
4487 void ci_update_requested_ps(struct radeon_device *rdev,
4488                             struct radeon_ps *rps)
4489 {
4490         struct ci_ps *new_ps = ci_get_ps(rps);
4491         struct ci_power_info *pi = ci_get_pi(rdev);
4492
4493         pi->requested_rps = *rps;
4494         pi->requested_ps = *new_ps;
4495         pi->requested_rps.ps_priv = &pi->requested_ps;
4496 }
4497
4498 int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
4499 {
4500         struct ci_power_info *pi = ci_get_pi(rdev);
4501         struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
4502         struct radeon_ps *new_ps = &requested_ps;
4503
4504         ci_update_requested_ps(rdev, new_ps);
4505
4506         ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
4507
4508         return 0;
4509 }
4510
4511 void ci_dpm_post_set_power_state(struct radeon_device *rdev)
4512 {
4513         struct ci_power_info *pi = ci_get_pi(rdev);
4514         struct radeon_ps *new_ps = &pi->requested_rps;
4515
4516         ci_update_current_ps(rdev, new_ps);
4517 }
4518
4519
4520 void ci_dpm_setup_asic(struct radeon_device *rdev)
4521 {
4522         ci_read_clock_registers(rdev);
4523         ci_get_memory_type(rdev);
4524         ci_enable_acpi_power_management(rdev);
4525         ci_init_sclk_t(rdev);
4526 }
4527
4528 int ci_dpm_enable(struct radeon_device *rdev)
4529 {
4530         struct ci_power_info *pi = ci_get_pi(rdev);
4531         struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4532         int ret;
4533
4534         cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4535                              RADEON_CG_BLOCK_MC |
4536                              RADEON_CG_BLOCK_SDMA |
4537                              RADEON_CG_BLOCK_BIF |
4538                              RADEON_CG_BLOCK_UVD |
4539                              RADEON_CG_BLOCK_HDP), false);
4540
4541         if (ci_is_smc_running(rdev))
4542                 return -EINVAL;
4543         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
4544                 ci_enable_voltage_control(rdev);
4545                 ret = ci_construct_voltage_tables(rdev);
4546                 if (ret) {
4547                         DRM_ERROR("ci_construct_voltage_tables failed\n");
4548                         return ret;
4549                 }
4550         }
4551         if (pi->caps_dynamic_ac_timing) {
4552                 ret = ci_initialize_mc_reg_table(rdev);
4553                 if (ret)
4554                         pi->caps_dynamic_ac_timing = false;
4555         }
4556         if (pi->dynamic_ss)
4557                 ci_enable_spread_spectrum(rdev, true);
4558         if (pi->thermal_protection)
4559                 ci_enable_thermal_protection(rdev, true);
4560         ci_program_sstp(rdev);
4561         ci_enable_display_gap(rdev);
4562         ci_program_vc(rdev);
4563         ret = ci_upload_firmware(rdev);
4564         if (ret) {
4565                 DRM_ERROR("ci_upload_firmware failed\n");
4566                 return ret;
4567         }
4568         ret = ci_process_firmware_header(rdev);
4569         if (ret) {
4570                 DRM_ERROR("ci_process_firmware_header failed\n");
4571                 return ret;
4572         }
4573         ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
4574         if (ret) {
4575                 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
4576                 return ret;
4577         }
4578         ret = ci_init_smc_table(rdev);
4579         if (ret) {
4580                 DRM_ERROR("ci_init_smc_table failed\n");
4581                 return ret;
4582         }
4583         ret = ci_init_arb_table_index(rdev);
4584         if (ret) {
4585                 DRM_ERROR("ci_init_arb_table_index failed\n");
4586                 return ret;
4587         }
4588         if (pi->caps_dynamic_ac_timing) {
4589                 ret = ci_populate_initial_mc_reg_table(rdev);
4590                 if (ret) {
4591                         DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
4592                         return ret;
4593                 }
4594         }
4595         ret = ci_populate_pm_base(rdev);
4596         if (ret) {
4597                 DRM_ERROR("ci_populate_pm_base failed\n");
4598                 return ret;
4599         }
4600         ci_dpm_start_smc(rdev);
4601         ci_enable_vr_hot_gpio_interrupt(rdev);
4602         ret = ci_notify_smc_display_change(rdev, false);
4603         if (ret) {
4604                 DRM_ERROR("ci_notify_smc_display_change failed\n");
4605                 return ret;
4606         }
4607         ci_enable_sclk_control(rdev, true);
4608         ret = ci_enable_ulv(rdev, true);
4609         if (ret) {
4610                 DRM_ERROR("ci_enable_ulv failed\n");
4611                 return ret;
4612         }
4613         ret = ci_enable_ds_master_switch(rdev, true);
4614         if (ret) {
4615                 DRM_ERROR("ci_enable_ds_master_switch failed\n");
4616                 return ret;
4617         }
4618         ret = ci_start_dpm(rdev);
4619         if (ret) {
4620                 DRM_ERROR("ci_start_dpm failed\n");
4621                 return ret;
4622         }
4623         ret = ci_enable_didt(rdev, true);
4624         if (ret) {
4625                 DRM_ERROR("ci_enable_didt failed\n");
4626                 return ret;
4627         }
4628         ret = ci_enable_smc_cac(rdev, true);
4629         if (ret) {
4630                 DRM_ERROR("ci_enable_smc_cac failed\n");
4631                 return ret;
4632         }
4633         ret = ci_enable_power_containment(rdev, true);
4634         if (ret) {
4635                 DRM_ERROR("ci_enable_power_containment failed\n");
4636                 return ret;
4637         }
4638         if (rdev->irq.installed &&
4639             r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
4640 #if 0
4641                 PPSMC_Result result;
4642 #endif
4643                 ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
4644                 if (ret) {
4645                         DRM_ERROR("ci_set_thermal_temperature_range failed\n");
4646                         return ret;
4647                 }
4648                 rdev->irq.dpm_thermal = true;
4649                 radeon_irq_set(rdev);
4650 #if 0
4651                 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
4652
4653                 if (result != PPSMC_Result_OK)
4654                         DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
4655 #endif
4656         }
4657
4658         ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
4659
4660         ci_dpm_powergate_uvd(rdev, true);
4661
4662         cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4663                              RADEON_CG_BLOCK_MC |
4664                              RADEON_CG_BLOCK_SDMA |
4665                              RADEON_CG_BLOCK_BIF |
4666                              RADEON_CG_BLOCK_UVD |
4667                              RADEON_CG_BLOCK_HDP), true);
4668
4669         ci_update_current_ps(rdev, boot_ps);
4670
4671         return 0;
4672 }
4673
4674 void ci_dpm_disable(struct radeon_device *rdev)
4675 {
4676         struct ci_power_info *pi = ci_get_pi(rdev);
4677         struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4678
4679         cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4680                              RADEON_CG_BLOCK_MC |
4681                              RADEON_CG_BLOCK_SDMA |
4682                              RADEON_CG_BLOCK_UVD |
4683                              RADEON_CG_BLOCK_HDP), false);
4684
4685         ci_dpm_powergate_uvd(rdev, false);
4686
4687         if (!ci_is_smc_running(rdev))
4688                 return;
4689
4690         if (pi->thermal_protection)
4691                 ci_enable_thermal_protection(rdev, false);
4692         ci_enable_power_containment(rdev, false);
4693         ci_enable_smc_cac(rdev, false);
4694         ci_enable_didt(rdev, false);
4695         ci_enable_spread_spectrum(rdev, false);
4696         ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
4697         ci_stop_dpm(rdev);
4698         ci_enable_ds_master_switch(rdev, true);
4699         ci_enable_ulv(rdev, false);
4700         ci_clear_vc(rdev);
4701         ci_reset_to_default(rdev);
4702         ci_dpm_stop_smc(rdev);
4703         ci_force_switch_to_arb_f0(rdev);
4704
4705         ci_update_current_ps(rdev, boot_ps);
4706 }
4707
4708 int ci_dpm_set_power_state(struct radeon_device *rdev)
4709 {
4710         struct ci_power_info *pi = ci_get_pi(rdev);
4711         struct radeon_ps *new_ps = &pi->requested_rps;
4712         struct radeon_ps *old_ps = &pi->current_rps;
4713         int ret;
4714
4715         cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4716                              RADEON_CG_BLOCK_MC |
4717                              RADEON_CG_BLOCK_SDMA |
4718                              RADEON_CG_BLOCK_BIF |
4719                              RADEON_CG_BLOCK_UVD |
4720                              RADEON_CG_BLOCK_HDP), false);
4721
4722         ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
4723         if (pi->pcie_performance_request)
4724                 ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
4725         ret = ci_freeze_sclk_mclk_dpm(rdev);
4726         if (ret) {
4727                 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
4728                 return ret;
4729         }
4730         ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
4731         if (ret) {
4732                 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
4733                 return ret;
4734         }
4735         ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
4736         if (ret) {
4737                 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
4738                 return ret;
4739         }
4740 #if 0
4741         ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
4742         if (ret) {
4743                 DRM_ERROR("ci_update_vce_dpm failed\n");
4744                 return ret;
4745         }
4746 #endif
4747         ret = ci_update_sclk_t(rdev);
4748         if (ret) {
4749                 DRM_ERROR("ci_update_sclk_t failed\n");
4750                 return ret;
4751         }
4752         if (pi->caps_dynamic_ac_timing) {
4753                 ret = ci_update_and_upload_mc_reg_table(rdev);
4754                 if (ret) {
4755                         DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
4756                         return ret;
4757                 }
4758         }
4759         ret = ci_program_memory_timing_parameters(rdev);
4760         if (ret) {
4761                 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
4762                 return ret;
4763         }
4764         ret = ci_unfreeze_sclk_mclk_dpm(rdev);
4765         if (ret) {
4766                 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
4767                 return ret;
4768         }
4769         ret = ci_upload_dpm_level_enable_mask(rdev);
4770         if (ret) {
4771                 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
4772                 return ret;
4773         }
4774         if (pi->pcie_performance_request)
4775                 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
4776
4777         cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4778                              RADEON_CG_BLOCK_MC |
4779                              RADEON_CG_BLOCK_SDMA |
4780                              RADEON_CG_BLOCK_BIF |
4781                              RADEON_CG_BLOCK_UVD |
4782                              RADEON_CG_BLOCK_HDP), true);
4783
4784         return 0;
4785 }
4786
4787 int ci_dpm_power_control_set_level(struct radeon_device *rdev)
4788 {
4789         return ci_power_control_set_level(rdev);
4790 }
4791
4792 void ci_dpm_reset_asic(struct radeon_device *rdev)
4793 {
4794         ci_set_boot_state(rdev);
4795 }
4796
4797 void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
4798 {
4799         ci_program_display_gap(rdev);
4800 }
4801
4802 union power_info {
4803         struct _ATOM_POWERPLAY_INFO info;
4804         struct _ATOM_POWERPLAY_INFO_V2 info_2;
4805         struct _ATOM_POWERPLAY_INFO_V3 info_3;
4806         struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
4807         struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
4808         struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
4809 };
4810
4811 union pplib_clock_info {
4812         struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
4813         struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
4814         struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
4815         struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
4816         struct _ATOM_PPLIB_SI_CLOCK_INFO si;
4817         struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
4818 };
4819
4820 union pplib_power_state {
4821         struct _ATOM_PPLIB_STATE v1;
4822         struct _ATOM_PPLIB_STATE_V2 v2;
4823 };
4824
4825 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
4826                                           struct radeon_ps *rps,
4827                                           struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
4828                                           u8 table_rev)
4829 {
4830         rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
4831         rps->class = le16_to_cpu(non_clock_info->usClassification);
4832         rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
4833
4834         if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
4835                 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
4836                 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
4837         } else {
4838                 rps->vclk = 0;
4839                 rps->dclk = 0;
4840         }
4841
4842         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
4843                 rdev->pm.dpm.boot_ps = rps;
4844         if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
4845                 rdev->pm.dpm.uvd_ps = rps;
4846 }
4847
4848 static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
4849                                       struct radeon_ps *rps, int index,
4850                                       union pplib_clock_info *clock_info)
4851 {
4852         struct ci_power_info *pi = ci_get_pi(rdev);
4853         struct ci_ps *ps = ci_get_ps(rps);
4854         struct ci_pl *pl = &ps->performance_levels[index];
4855
4856         ps->performance_level_count = index + 1;
4857
4858         pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
4859         pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
4860         pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
4861         pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
4862
4863         pl->pcie_gen = r600_get_pcie_gen_support(rdev,
4864                                                  pi->sys_pcie_mask,
4865                                                  pi->vbios_boot_state.pcie_gen_bootup_value,
4866                                                  clock_info->ci.ucPCIEGen);
4867         pl->pcie_lane = r600_get_pcie_lane_support(rdev,
4868                                                    pi->vbios_boot_state.pcie_lane_bootup_value,
4869                                                    le16_to_cpu(clock_info->ci.usPCIELane));
4870
4871         if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
4872                 pi->acpi_pcie_gen = pl->pcie_gen;
4873         }
4874
4875         if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
4876                 pi->ulv.supported = true;
4877                 pi->ulv.pl = *pl;
4878                 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
4879         }
4880
4881         /* patch up boot state */
4882         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
4883                 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
4884                 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
4885                 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
4886                 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
4887         }
4888
4889         switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
4890         case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
4891                 pi->use_pcie_powersaving_levels = true;
4892                 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
4893                         pi->pcie_gen_powersaving.max = pl->pcie_gen;
4894                 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
4895                         pi->pcie_gen_powersaving.min = pl->pcie_gen;
4896                 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
4897                         pi->pcie_lane_powersaving.max = pl->pcie_lane;
4898                 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
4899                         pi->pcie_lane_powersaving.min = pl->pcie_lane;
4900                 break;
4901         case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
4902                 pi->use_pcie_performance_levels = true;
4903                 if (pi->pcie_gen_performance.max < pl->pcie_gen)
4904                         pi->pcie_gen_performance.max = pl->pcie_gen;
4905                 if (pi->pcie_gen_performance.min > pl->pcie_gen)
4906                         pi->pcie_gen_performance.min = pl->pcie_gen;
4907                 if (pi->pcie_lane_performance.max < pl->pcie_lane)
4908                         pi->pcie_lane_performance.max = pl->pcie_lane;
4909                 if (pi->pcie_lane_performance.min > pl->pcie_lane)
4910                         pi->pcie_lane_performance.min = pl->pcie_lane;
4911                 break;
4912         default:
4913                 break;
4914         }
4915 }
4916
4917 static int ci_parse_power_table(struct radeon_device *rdev)
4918 {
4919         struct radeon_mode_info *mode_info = &rdev->mode_info;
4920         struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
4921         union pplib_power_state *power_state;
4922         int i, j, k, non_clock_array_index, clock_array_index;
4923         union pplib_clock_info *clock_info;
4924         struct _StateArray *state_array;
4925         struct _ClockInfoArray *clock_info_array;
4926         struct _NonClockInfoArray *non_clock_info_array;
4927         union power_info *power_info;
4928         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
4929         u16 data_offset;
4930         u8 frev, crev;
4931         u8 *power_state_offset;
4932         struct ci_ps *ps;
4933
4934         if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
4935                                    &frev, &crev, &data_offset))
4936                 return -EINVAL;
4937         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
4938
4939         state_array = (struct _StateArray *)
4940                 (mode_info->atom_context->bios + data_offset +
4941                  le16_to_cpu(power_info->pplib.usStateArrayOffset));
4942         clock_info_array = (struct _ClockInfoArray *)
4943                 (mode_info->atom_context->bios + data_offset +
4944                  le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
4945         non_clock_info_array = (struct _NonClockInfoArray *)
4946                 (mode_info->atom_context->bios + data_offset +
4947                  le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
4948
4949         rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
4950                                   state_array->ucNumEntries, GFP_KERNEL);
4951         if (!rdev->pm.dpm.ps)
4952                 return -ENOMEM;
4953         power_state_offset = (u8 *)state_array->states;
4954         rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
4955         rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
4956         rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
4957         for (i = 0; i < state_array->ucNumEntries; i++) {
4958                 u8 *idx;
4959                 power_state = (union pplib_power_state *)power_state_offset;
4960                 non_clock_array_index = power_state->v2.nonClockInfoIndex;
4961                 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
4962                         &non_clock_info_array->nonClockInfo[non_clock_array_index];
4963                 if (!rdev->pm.power_state[i].clock_info)
4964                         return -EINVAL;
4965                 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
4966                 if (ps == NULL) {
4967                         kfree(rdev->pm.dpm.ps);
4968                         return -ENOMEM;
4969                 }
4970                 rdev->pm.dpm.ps[i].ps_priv = ps;
4971                 ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
4972                                               non_clock_info,
4973                                               non_clock_info_array->ucEntrySize);
4974                 k = 0;
4975                 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
4976                 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
4977                         clock_array_index = idx[j];
4978                         if (clock_array_index >= clock_info_array->ucNumEntries)
4979                                 continue;
4980                         if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
4981                                 break;
4982                         clock_info = (union pplib_clock_info *)
4983                                 ((u8 *)&clock_info_array->clockInfo[0] +
4984                                  (clock_array_index * clock_info_array->ucEntrySize));
4985                         ci_parse_pplib_clock_info(rdev,
4986                                                   &rdev->pm.dpm.ps[i], k,
4987                                                   clock_info);
4988                         k++;
4989                 }
4990                 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
4991         }
4992         rdev->pm.dpm.num_ps = state_array->ucNumEntries;
4993         return 0;
4994 }
4995
4996 int ci_get_vbios_boot_values(struct radeon_device *rdev,
4997                              struct ci_vbios_boot_state *boot_state)
4998 {
4999         struct radeon_mode_info *mode_info = &rdev->mode_info;
5000         int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5001         ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5002         u8 frev, crev;
5003         u16 data_offset;
5004
5005         if (atom_parse_data_header(mode_info->atom_context, index, NULL,
5006                                    &frev, &crev, &data_offset)) {
5007                 firmware_info =
5008                         (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5009                                                     data_offset);
5010                 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5011                 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5012                 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5013                 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
5014                 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
5015                 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5016                 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5017
5018                 return 0;
5019         }
5020         return -EINVAL;
5021 }
5022
5023 void ci_dpm_fini(struct radeon_device *rdev)
5024 {
5025         int i;
5026
5027         for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5028                 kfree(rdev->pm.dpm.ps[i].ps_priv);
5029         }
5030         kfree(rdev->pm.dpm.ps);
5031         kfree(rdev->pm.dpm.priv);
5032         kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5033         r600_free_extended_power_table(rdev);
5034 }
5035
5036 int ci_dpm_init(struct radeon_device *rdev)
5037 {
5038         int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5039         u16 data_offset, size;
5040         u8 frev, crev;
5041         struct ci_power_info *pi;
5042         int ret;
5043         u32 mask;
5044
5045         pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5046         if (pi == NULL)
5047                 return -ENOMEM;
5048         rdev->pm.dpm.priv = pi;
5049
5050         ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
5051         if (ret)
5052                 pi->sys_pcie_mask = 0;
5053         else
5054                 pi->sys_pcie_mask = mask;
5055         pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5056
5057         pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5058         pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5059         pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5060         pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5061
5062         pi->pcie_lane_performance.max = 0;
5063         pi->pcie_lane_performance.min = 16;
5064         pi->pcie_lane_powersaving.max = 0;
5065         pi->pcie_lane_powersaving.min = 16;
5066
5067         ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5068         if (ret) {
5069                 ci_dpm_fini(rdev);
5070                 return ret;
5071         }
5072         ret = ci_parse_power_table(rdev);
5073         if (ret) {
5074                 ci_dpm_fini(rdev);
5075                 return ret;
5076         }
5077         ret = r600_parse_extended_power_table(rdev);
5078         if (ret) {
5079                 ci_dpm_fini(rdev);
5080                 return ret;
5081         }
5082
5083         pi->dll_default_on = false;
5084         pi->sram_end = SMC_RAM_END;
5085
5086         pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5087         pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5088         pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5089         pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5090         pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5091         pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5092         pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5093         pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5094
5095         pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5096
5097         pi->sclk_dpm_key_disabled = 0;
5098         pi->mclk_dpm_key_disabled = 0;
5099         pi->pcie_dpm_key_disabled = 0;
5100
5101         pi->caps_sclk_ds = true;
5102
5103         pi->mclk_strobe_mode_threshold = 40000;
5104         pi->mclk_stutter_mode_threshold = 40000;
5105         pi->mclk_edc_enable_threshold = 40000;
5106         pi->mclk_edc_wr_enable_threshold = 40000;
5107
5108         ci_initialize_powertune_defaults(rdev);
5109
5110         pi->caps_fps = false;
5111
5112         pi->caps_sclk_throttle_low_notification = false;
5113
5114         pi->caps_uvd_dpm = true;
5115
5116         ci_get_leakage_voltages(rdev);
5117         ci_patch_dependency_tables_with_leakage(rdev);
5118         ci_set_private_data_variables_based_on_pptable(rdev);
5119
5120         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5121                 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
5122         if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5123                 ci_dpm_fini(rdev);
5124                 return -ENOMEM;
5125         }
5126         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5127         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5128         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5129         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5130         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5131         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5132         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5133         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5134         rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5135
5136         rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5137         rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5138         rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5139
5140         rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5141         rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5142         rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5143         rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5144
5145         pi->thermal_temp_setting.temperature_low = 99500;
5146         pi->thermal_temp_setting.temperature_high = 100000;
5147         pi->thermal_temp_setting.temperature_shutdown = 104000;
5148
5149         pi->uvd_enabled = false;
5150
5151         pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5152         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5153         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5154         if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5155                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5156         else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5157                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5158
5159         if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5160                 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5161                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5162                 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5163                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5164                 else
5165                         rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5166         }
5167
5168         if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5169                 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5170                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5171                 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5172                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5173                 else
5174                         rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5175         }
5176
5177         pi->vddc_phase_shed_control = true;
5178
5179 #if defined(CONFIG_ACPI)
5180         pi->pcie_performance_request =
5181                 radeon_acpi_is_pcie_performance_request_supported(rdev);
5182 #else
5183         pi->pcie_performance_request = false;
5184 #endif
5185
5186         if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5187                                    &frev, &crev, &data_offset)) {
5188                 pi->caps_sclk_ss_support = true;
5189                 pi->caps_mclk_ss_support = true;
5190                 pi->dynamic_ss = true;
5191         } else {
5192                 pi->caps_sclk_ss_support = false;
5193                 pi->caps_mclk_ss_support = false;
5194                 pi->dynamic_ss = true;
5195         }
5196
5197         if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5198                 pi->thermal_protection = true;
5199         else
5200                 pi->thermal_protection = false;
5201
5202         pi->caps_dynamic_ac_timing = true;
5203
5204         pi->uvd_power_gated = false;
5205
5206         /* make sure dc limits are valid */
5207         if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5208             (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5209                 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5210                         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5211
5212         return 0;
5213 }
5214
5215 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5216                                                     struct seq_file *m)
5217 {
5218         u32 sclk = ci_get_average_sclk_freq(rdev);
5219         u32 mclk = ci_get_average_mclk_freq(rdev);
5220
5221         seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
5222                    sclk, mclk);
5223 }
5224
5225 void ci_dpm_print_power_state(struct radeon_device *rdev,
5226                               struct radeon_ps *rps)
5227 {
5228         struct ci_ps *ps = ci_get_ps(rps);
5229         struct ci_pl *pl;
5230         int i;
5231
5232         r600_dpm_print_class_info(rps->class, rps->class2);
5233         r600_dpm_print_cap_info(rps->caps);
5234         printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5235         for (i = 0; i < ps->performance_level_count; i++) {
5236                 pl = &ps->performance_levels[i];
5237                 printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5238                        i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5239         }
5240         r600_dpm_print_ps_status(rdev, rps);
5241 }
5242
5243 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5244 {
5245         struct ci_power_info *pi = ci_get_pi(rdev);
5246         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5247
5248         if (low)
5249                 return requested_state->performance_levels[0].sclk;
5250         else
5251                 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5252 }
5253
5254 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5255 {
5256         struct ci_power_info *pi = ci_get_pi(rdev);
5257         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5258
5259         if (low)
5260                 return requested_state->performance_levels[0].mclk;
5261         else
5262                 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5263 }