2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
24 #include <core/gpuobj.h>
25 #include <core/memory.h>
26 #include <subdev/timer.h>
29 nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
30 u32 size, u16 tag, u8 port, bool secure)
38 reg = start | BIT(24) | (secure ? BIT(28) : 0);
39 nvkm_falcon_wr32(falcon, 0x180 + (port * 16), reg);
40 for (i = 0; i < size / 4; i++) {
41 /* write new tag every 256B */
43 nvkm_falcon_wr32(falcon, 0x188, tag++);
44 nvkm_falcon_wr32(falcon, 0x184, ((u32 *)data)[i]);
48 * If size is not a multiple of 4, mask the last work to ensure garbage
49 * does not get written
52 u32 extra = ((u32 *)data)[i];
54 /* write new tag every 256B */
56 nvkm_falcon_wr32(falcon, 0x188, tag++);
57 nvkm_falcon_wr32(falcon, 0x184, extra & (BIT(rem * 8) - 1));
61 /* code must be padded to 0x40 words */
63 nvkm_falcon_wr32(falcon, 0x184, 0);
67 nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
75 nvkm_falcon_wr32(falcon, 0x1c0 + (port * 16), start | (0x1 << 24));
76 for (i = 0; i < size / 4; i++)
77 nvkm_falcon_wr32(falcon, 0x1c4, ((u32 *)data)[i]);
80 * If size is not a multiple of 4, mask the last work to ensure garbage
84 u32 extra = ((u32 *)data)[i];
86 nvkm_falcon_wr32(falcon, 0x1c4, extra & (BIT(rem * 8) - 1));
91 nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
99 nvkm_falcon_wr32(falcon, 0x1c0 + (port * 16), start | (0x1 << 25));
100 for (i = 0; i < size / 4; i++)
101 ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0x1c4);
104 * If size is not a multiple of 4, mask the last work to ensure garbage
108 u32 extra = nvkm_falcon_rd32(falcon, 0x1c4);
110 for (i = size; i < size + rem; i++) {
111 ((u8 *)data)[i] = (u8)(extra & 0xff);
118 nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
122 /* disable instance block binding */
124 nvkm_falcon_wr32(falcon, 0x10c, 0x0);
128 nvkm_falcon_wr32(falcon, 0x10c, 0x1);
130 /* setup apertures - virtual */
131 nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_UCODE, 0x4);
132 nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_VIRT, 0x0);
133 /* setup apertures - physical */
134 nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_VID, 0x4);
135 nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_SYS_COH, 0x5);
136 nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6);
139 switch (nvkm_memory_target(ctx->memory)) {
140 case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break;
141 case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break;
148 nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1);
149 nvkm_falcon_wr32(falcon, 0x480,
150 ((ctx->addr >> 12) & 0xfffffff) |
151 (inst_loc << 28) | (1 << 30));
155 nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
157 nvkm_falcon_wr32(falcon, 0x104, start_addr);
161 nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
163 u32 reg = nvkm_falcon_rd32(falcon, 0x100);
166 nvkm_falcon_wr32(falcon, 0x130, 0x2);
168 nvkm_falcon_wr32(falcon, 0x100, 0x2);
172 nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
174 struct nvkm_device *device = falcon->owner->device;
177 ret = nvkm_wait_msec(device, ms, falcon->addr + 0x100, 0x10, 0x10);
185 nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
187 struct nvkm_device *device = falcon->owner->device;
190 /* clear interrupt(s) */
191 nvkm_falcon_mask(falcon, 0x004, mask, mask);
192 /* wait until interrupts are cleared */
193 ret = nvkm_wait_msec(device, 10, falcon->addr + 0x008, mask, 0x0);
201 falcon_v1_wait_idle(struct nvkm_falcon *falcon)
203 struct nvkm_device *device = falcon->owner->device;
206 ret = nvkm_wait_msec(device, 10, falcon->addr + 0x04c, 0xffff, 0x0);
214 nvkm_falcon_v1_enable(struct nvkm_falcon *falcon)
216 struct nvkm_device *device = falcon->owner->device;
219 ret = nvkm_wait_msec(device, 10, falcon->addr + 0x10c, 0x6, 0x0);
221 nvkm_error(falcon->user, "Falcon mem scrubbing timeout\n");
225 ret = falcon_v1_wait_idle(falcon);
230 nvkm_falcon_wr32(falcon, 0x010, 0xff);
236 nvkm_falcon_v1_disable(struct nvkm_falcon *falcon)
238 /* disable IRQs and wait for any previous code to complete */
239 nvkm_falcon_wr32(falcon, 0x014, 0xff);
240 falcon_v1_wait_idle(falcon);
243 static const struct nvkm_falcon_func
245 .load_imem = nvkm_falcon_v1_load_imem,
246 .load_dmem = nvkm_falcon_v1_load_dmem,
247 .read_dmem = nvkm_falcon_v1_read_dmem,
248 .bind_context = nvkm_falcon_v1_bind_context,
249 .start = nvkm_falcon_v1_start,
250 .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
251 .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
252 .enable = nvkm_falcon_v1_enable,
253 .disable = nvkm_falcon_v1_disable,
254 .set_start_addr = nvkm_falcon_v1_set_start_addr,
258 nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr,
259 struct nvkm_falcon **pfalcon)
261 struct nvkm_falcon *falcon;
262 if (!(falcon = *pfalcon = kzalloc(sizeof(*falcon), GFP_KERNEL)))
264 nvkm_falcon_ctor(&nvkm_falcon_v1, owner, name, addr, falcon);