2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
26 #include <core/gpuobj.h>
27 #include <core/firmware.h>
28 #include <engine/falcon.h>
31 * struct hsf_fw_header - HS firmware descriptor
32 * @sig_dbg_offset: offset of the debug signature
33 * @sig_dbg_size: size of the debug signature
34 * @sig_prod_offset: offset of the production signature
35 * @sig_prod_size: size of the production signature
36 * @patch_loc: offset of the offset (sic) of where the signature is
37 * @patch_sig: offset of the offset (sic) to add to sig_*_offset
38 * @hdr_offset: offset of the load header (see struct hs_load_header)
39 * @hdr_size: size of above header
41 * This structure is embedded in the HS firmware image at
42 * hs_bin_hdr.header_offset.
44 struct hsf_fw_header {
56 * struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor
57 * @signature: 16B signature for secure code. 0s if no secure code
58 * @ctx_dma: DMA context to be used by BL while loading code/data
59 * @code_dma_base: 256B-aligned Physical FB Address where code is located
60 * (falcon's $xcbase register)
61 * @non_sec_code_off: offset from code_dma_base where the non-secure code is
62 * located. The offset must be multiple of 256 to help perf
63 * @non_sec_code_size: the size of the nonSecure code part.
64 * @sec_code_off: offset from code_dma_base where the secure code is
65 * located. The offset must be multiple of 256 to help perf
66 * @sec_code_size: offset from code_dma_base where the secure code is
67 * located. The offset must be multiple of 256 to help perf
68 * @code_entry_point: code entry point which will be invoked by BL after
70 * @data_dma_base: 256B aligned Physical FB Address where data is located.
71 * (falcon's $xdbase register)
72 * @data_size: size of data block. Should be multiple of 256B
74 * Structure used by the bootloader to load the rest of the code. This has
75 * to be filled by host and copied into DMEM at offset provided in the
76 * hsflcn_bl_desc.bl_desc_dmem_load_off.
78 struct acr_r352_flcn_bl_desc {
84 u32 non_sec_code_size;
93 * acr_r352_generate_flcn_bl_desc - generate generic BL descriptor for LS image
96 acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr,
97 const struct ls_ucode_img *img, u64 wpr_addr,
100 struct acr_r352_flcn_bl_desc *desc = _desc;
101 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
102 u64 base, addr_code, addr_data;
104 base = wpr_addr + img->lsb_header.ucode_off + pdesc->app_start_offset;
105 addr_code = (base + pdesc->app_resident_code_offset) >> 8;
106 addr_data = (base + pdesc->app_resident_data_offset) >> 8;
108 memset(desc, 0, sizeof(*desc));
109 desc->ctx_dma = FALCON_DMAIDX_UCODE;
110 desc->code_dma_base = lower_32_bits(addr_code);
111 desc->non_sec_code_off = pdesc->app_resident_code_offset;
112 desc->non_sec_code_size = pdesc->app_resident_code_size;
113 desc->code_entry_point = pdesc->app_imem_entry;
114 desc->data_dma_base = lower_32_bits(addr_data);
115 desc->data_size = pdesc->app_resident_data_size;
120 * struct hsflcn_acr_desc - data section of the HS firmware
122 * This header is to be copied at the beginning of DMEM by the HS bootloader.
124 * @signature: signature of ACR ucode
125 * @wpr_region_id: region ID holding the WPR header and its details
126 * @wpr_offset: offset from the WPR region holding the wpr header
127 * @regions: region descriptors
128 * @nonwpr_ucode_blob_size: size of LS blob
129 * @nonwpr_ucode_blob_start: FB location of LS blob is
131 struct hsflcn_acr_desc {
133 u8 reserved_dmem[0x200];
135 } ucode_reserved_space;
139 #define FLCN_ACR_MAX_REGIONS 2
149 } region_props[FLCN_ACR_MAX_REGIONS];
152 u64 ucode_blob_base __aligned(8);
163 * Low-secure blob creation
166 typedef int (*lsf_load_func)(const struct nvkm_subdev *, struct ls_ucode_img *);
169 * ls_ucode_img_load() - create a lsf_ucode_img and load it
171 static struct ls_ucode_img *
172 ls_ucode_img_load(const struct nvkm_subdev *subdev, lsf_load_func load_func)
174 struct ls_ucode_img *img;
177 img = kzalloc(sizeof(*img), GFP_KERNEL);
179 return ERR_PTR(-ENOMEM);
181 ret = load_func(subdev, img);
191 #define LSF_LSB_HEADER_ALIGN 256
192 #define LSF_BL_DATA_ALIGN 256
193 #define LSF_BL_DATA_SIZE_ALIGN 256
194 #define LSF_BL_CODE_SIZE_ALIGN 256
195 #define LSF_UCODE_DATA_ALIGN 4096
198 * ls_ucode_img_fill_headers - fill the WPR and LSB headers of an image
200 * @img: image to generate for
201 * @offset: offset in the WPR region where this image starts
203 * Allocate space in the WPR area from offset and write the WPR and LSB headers
206 * Return: offset at the end of this image.
209 ls_ucode_img_fill_headers(struct acr_r352 *acr, struct ls_ucode_img *img,
212 struct lsf_wpr_header *whdr = &img->wpr_header;
213 struct lsf_lsb_header *lhdr = &img->lsb_header;
214 struct ls_ucode_img_desc *desc = &img->ucode_desc;
215 const struct acr_r352_ls_func *func =
216 acr->func->ls_func[img->falcon_id];
218 if (img->ucode_header) {
219 nvkm_fatal(acr->base.subdev,
220 "images withough loader are not supported yet!\n");
224 /* Fill WPR header */
225 whdr->falcon_id = img->falcon_id;
226 whdr->bootstrap_owner = acr->base.boot_falcon;
227 whdr->status = LSF_IMAGE_STATUS_COPY;
229 /* Align, save off, and include an LSB header size */
230 offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
231 whdr->lsb_offset = offset;
232 offset += sizeof(struct lsf_lsb_header);
235 * Align, save off, and include the original (static) ucode
238 offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
239 lhdr->ucode_off = offset;
240 offset += img->ucode_size;
243 * For falcons that use a boot loader (BL), we append a loader
244 * desc structure on the end of the ucode image and consider
245 * this the boot loader data. The host will then copy the loader
246 * desc args to this space within the WPR region (before locking
247 * down) and the HS bin will then copy them to DMEM 0 for the
250 lhdr->bl_code_size = ALIGN(desc->bootloader_size,
251 LSF_BL_CODE_SIZE_ALIGN);
252 lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
253 LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
254 lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
255 lhdr->bl_code_size - lhdr->ucode_size;
257 * Though the BL is located at 0th offset of the image, the VA
258 * is different to make sure that it doesn't collide the actual
261 lhdr->bl_imem_off = desc->bootloader_imem_offset;
262 lhdr->app_code_off = desc->app_start_offset +
263 desc->app_resident_code_offset;
264 lhdr->app_code_size = desc->app_resident_code_size;
265 lhdr->app_data_off = desc->app_start_offset +
266 desc->app_resident_data_offset;
267 lhdr->app_data_size = desc->app_resident_data_size;
269 lhdr->flags = func->lhdr_flags;
270 if (img->falcon_id == acr->base.boot_falcon)
271 lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
273 /* Align and save off BL descriptor size */
274 lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
277 * Align, save off, and include the additional BL data
279 offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
280 lhdr->bl_data_off = offset;
281 offset += lhdr->bl_data_size;
287 * struct ls_ucode_mgr - manager for all LS falcon firmwares
288 * @count: number of managed LS falcons
289 * @wpr_size: size of the required WPR region in bytes
290 * @img_list: linked list of lsf_ucode_img
292 struct ls_ucode_mgr {
295 struct list_head img_list;
299 ls_ucode_mgr_init(struct ls_ucode_mgr *mgr)
301 memset(mgr, 0, sizeof(*mgr));
302 INIT_LIST_HEAD(&mgr->img_list);
306 ls_ucode_mgr_cleanup(struct ls_ucode_mgr *mgr)
308 struct ls_ucode_img *img, *t;
310 list_for_each_entry_safe(img, t, &mgr->img_list, node) {
311 kfree(img->ucode_data);
312 kfree(img->ucode_header);
318 ls_ucode_mgr_add_img(struct ls_ucode_mgr *mgr, struct ls_ucode_img *img)
321 list_add_tail(&img->node, &mgr->img_list);
325 * ls_ucode_mgr_fill_headers - fill WPR and LSB headers of all managed images
328 ls_ucode_mgr_fill_headers(struct acr_r352 *acr, struct ls_ucode_mgr *mgr)
330 struct ls_ucode_img *img;
334 * Start with an array of WPR headers at the base of the WPR.
335 * The expectation here is that the secure falcon will do a single DMA
336 * read of this array and cache it internally so it's ok to pack these.
337 * Also, we add 1 to the falcon count to indicate the end of the array.
339 offset = sizeof(struct lsf_wpr_header) * (mgr->count + 1);
342 * Walk the managed falcons, accounting for the LSB structs
343 * as well as the ucode images.
345 list_for_each_entry(img, &mgr->img_list, node) {
346 offset = ls_ucode_img_fill_headers(acr, img, offset);
349 mgr->wpr_size = offset;
353 * ls_ucode_mgr_write_wpr - write the WPR blob contents
356 ls_ucode_mgr_write_wpr(struct acr_r352 *acr, struct ls_ucode_mgr *mgr,
357 struct nvkm_gpuobj *wpr_blob, u32 wpr_addr)
359 struct ls_ucode_img *img;
364 list_for_each_entry(img, &mgr->img_list, node) {
365 nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
366 sizeof(img->wpr_header));
368 nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
369 &img->lsb_header, sizeof(img->lsb_header));
371 /* Generate and write BL descriptor */
372 if (!img->ucode_header) {
373 const struct acr_r352_ls_func *ls_func =
374 acr->func->ls_func[img->falcon_id];
375 u8 gdesc[ls_func->bl_desc_size];
377 ls_func->generate_bl_desc(&acr->base, img, wpr_addr,
380 nvkm_gpuobj_memcpy_to(wpr_blob,
381 img->lsb_header.bl_data_off,
382 gdesc, ls_func->bl_desc_size);
386 nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
387 img->ucode_data, img->ucode_size);
389 pos += sizeof(img->wpr_header);
392 nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
399 /* Both size and address of WPR need to be 128K-aligned */
400 #define WPR_ALIGNMENT 0x20000
402 * acr_r352_prepare_ls_blob() - prepare the LS blob
404 * For each securely managed falcon, load the FW, signatures and bootloaders and
405 * prepare a ucode blob. Then, compute the offsets in the WPR region for each
406 * blob, and finally write the headers and ucode blobs into a GPU object that
407 * will be copied into the WPR region by the HS firmware.
410 acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
412 const struct nvkm_subdev *subdev = acr->base.subdev;
413 struct ls_ucode_mgr mgr;
414 unsigned long managed_falcons = acr->base.managed_falcons;
418 ls_ucode_mgr_init(&mgr);
420 /* Load all LS blobs */
421 for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
422 struct ls_ucode_img *img;
424 img = ls_ucode_img_load(subdev,
425 acr->func->ls_func[falcon_id]->load);
431 ls_ucode_mgr_add_img(&mgr, img);
435 * Fill the WPR and LSF headers with the right offsets and compute
438 ls_ucode_mgr_fill_headers(acr, &mgr);
439 mgr.wpr_size = ALIGN(mgr.wpr_size, WPR_ALIGNMENT);
441 /* Allocate GPU object that will contain the WPR region */
442 ret = nvkm_gpuobj_new(subdev->device, mgr.wpr_size, WPR_ALIGNMENT,
443 false, NULL, &acr->ls_blob);
447 nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n",
448 mgr.count, mgr.wpr_size);
450 /* If WPR address and size are not fixed, set them to fit the LS blob */
452 wpr_addr = acr->ls_blob->addr;
453 wpr_size = mgr.wpr_size;
455 * But if the WPR region is set by the bootloader, it is illegal for
456 * the HS blob to be larger than this region.
458 } else if (mgr.wpr_size > wpr_size) {
459 nvkm_error(subdev, "WPR region too small for FW blob!\n");
460 nvkm_error(subdev, "required: %dB\n", mgr.wpr_size);
461 nvkm_error(subdev, "available: %dB\n", wpr_size);
467 ret = ls_ucode_mgr_write_wpr(acr, &mgr, acr->ls_blob, wpr_addr);
469 nvkm_gpuobj_del(&acr->ls_blob);
472 ls_ucode_mgr_cleanup(&mgr);
481 * acr_r352_hsf_patch_signature() - patch HS blob with correct signature
484 acr_r352_hsf_patch_signature(struct nvkm_secboot *sb, void *acr_image)
486 struct fw_bin_header *hsbin_hdr = acr_image;
487 struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
488 void *hs_data = acr_image + hsbin_hdr->data_offset;
492 /* Falcon in debug or production mode? */
493 if (sb->boot_falcon->debug) {
494 sig = acr_image + fw_hdr->sig_dbg_offset;
495 sig_size = fw_hdr->sig_dbg_size;
497 sig = acr_image + fw_hdr->sig_prod_offset;
498 sig_size = fw_hdr->sig_prod_size;
501 /* Patch signature */
502 memcpy(hs_data + fw_hdr->patch_loc, sig + fw_hdr->patch_sig, sig_size);
506 acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
507 struct hsflcn_acr_desc *desc)
509 struct nvkm_gpuobj *ls_blob = acr->ls_blob;
511 desc->ucode_blob_base = ls_blob->addr;
512 desc->ucode_blob_size = ls_blob->size;
514 desc->wpr_offset = 0;
516 /* WPR region information if WPR is not fixed */
517 if (sb->wpr_size == 0) {
518 desc->wpr_region_id = 1;
519 desc->regions.no_regions = 1;
520 desc->regions.region_props[0].region_id = 1;
521 desc->regions.region_props[0].start_addr = ls_blob->addr >> 8;
522 desc->regions.region_props[0].end_addr =
523 (ls_blob->addr + ls_blob->size) >> 8;
528 acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
531 struct acr_r352_flcn_bl_desc *bl_desc = _bl_desc;
532 u64 addr_code, addr_data;
534 memset(bl_desc, 0, sizeof(*bl_desc));
535 addr_code = offset >> 8;
536 addr_data = (offset + hdr->data_dma_base) >> 8;
538 bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
539 bl_desc->code_dma_base = lower_32_bits(addr_code);
540 bl_desc->non_sec_code_off = hdr->non_sec_code_off;
541 bl_desc->non_sec_code_size = hdr->non_sec_code_size;
542 bl_desc->sec_code_off = hdr->app[0].sec_code_off;
543 bl_desc->sec_code_size = hdr->app[0].sec_code_size;
544 bl_desc->code_entry_point = 0;
545 bl_desc->data_dma_base = lower_32_bits(addr_data);
546 bl_desc->data_size = hdr->data_size;
550 * acr_r352_prepare_hs_blob - load and prepare a HS blob and BL descriptor
552 * @sb secure boot instance to prepare for
553 * @fw name of the HS firmware to load
554 * @blob pointer to gpuobj that will be allocated to receive the HS FW payload
555 * @bl_desc pointer to the BL descriptor to write for this firmware
556 * @patch whether we should patch the HS descriptor (only for HS loaders)
559 acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
560 const char *fw, struct nvkm_gpuobj **blob,
561 struct hsf_load_header *load_header, bool patch)
563 struct nvkm_subdev *subdev = &sb->subdev;
565 struct fw_bin_header *hsbin_hdr;
566 struct hsf_fw_header *fw_hdr;
567 struct hsf_load_header *load_hdr;
571 acr_image = nvkm_acr_load_firmware(subdev, fw, 0);
572 if (IS_ERR(acr_image))
573 return PTR_ERR(acr_image);
575 hsbin_hdr = acr_image;
576 fw_hdr = acr_image + hsbin_hdr->header_offset;
577 load_hdr = acr_image + fw_hdr->hdr_offset;
578 acr_data = acr_image + hsbin_hdr->data_offset;
580 /* Patch signature */
581 acr_r352_hsf_patch_signature(sb, acr_image);
583 /* Patch descriptor with WPR information? */
585 struct hsflcn_acr_desc *desc;
587 desc = acr_data + load_hdr->data_dma_base;
588 acr_r352_fixup_hs_desc(acr, sb, desc);
591 if (load_hdr->num_apps > ACR_R352_MAX_APPS) {
592 nvkm_error(subdev, "more apps (%d) than supported (%d)!",
593 load_hdr->num_apps, ACR_R352_MAX_APPS);
597 memcpy(load_header, load_hdr, sizeof(*load_header) +
598 (sizeof(load_hdr->app[0]) * load_hdr->num_apps));
600 /* Create ACR blob and copy HS data to it */
601 ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
602 0x1000, false, NULL, blob);
607 nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size);
617 acr_r352_prepare_hsbl_blob(struct acr_r352 *acr)
619 const struct nvkm_subdev *subdev = acr->base.subdev;
620 struct fw_bin_header *hdr;
621 struct fw_bl_desc *hsbl_desc;
623 acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0);
624 if (IS_ERR(acr->hsbl_blob)) {
625 int ret = PTR_ERR(acr->hsbl_blob);
627 acr->hsbl_blob = NULL;
631 hdr = acr->hsbl_blob;
632 hsbl_desc = acr->hsbl_blob + hdr->header_offset;
634 /* virtual start address for boot vector */
635 acr->base.start_address = hsbl_desc->start_tag << 8;
641 * acr_r352_load_blobs - load blobs common to all ACR V1 versions.
643 * This includes the LS blob, HS ucode loading blob, and HS bootloader.
645 * The HS ucode unload blob is only used on dGPU if the WPR region is variable.
648 acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
652 /* Firmware already loaded? */
653 if (acr->firmware_ok)
656 /* Load and prepare the managed falcon's firmwares */
657 ret = acr_r352_prepare_ls_blob(acr, sb->wpr_addr, sb->wpr_size);
661 /* Load the HS firmware that will load the LS firmwares */
662 if (!acr->load_blob) {
663 ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_load",
665 &acr->load_bl_header, true);
670 /* If the ACR region is dynamically programmed, we need an unload FW */
671 if (sb->wpr_size == 0) {
672 ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_unload",
674 &acr->unload_bl_header, false);
679 /* Load the HS firmware bootloader */
680 if (!acr->hsbl_blob) {
681 ret = acr_r352_prepare_hsbl_blob(acr);
686 acr->firmware_ok = true;
687 nvkm_debug(&sb->subdev, "LS blob successfully created\n");
693 * acr_r352_load() - prepare HS falcon to run the specified blob, mapped
694 * at GPU address offset.
697 acr_r352_load(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
698 struct nvkm_gpuobj *blob, u64 offset)
700 struct acr_r352 *acr = acr_r352(_acr);
701 struct nvkm_falcon *falcon = sb->boot_falcon;
702 struct fw_bin_header *hdr = acr->hsbl_blob;
703 struct fw_bl_desc *hsbl_desc = acr->hsbl_blob + hdr->header_offset;
704 void *blob_data = acr->hsbl_blob + hdr->data_offset;
705 void *hsbl_code = blob_data + hsbl_desc->code_off;
706 void *hsbl_data = blob_data + hsbl_desc->data_off;
707 u32 code_size = ALIGN(hsbl_desc->code_size, 256);
708 const struct hsf_load_header *load_hdr;
709 const u32 bl_desc_size = acr->func->hs_bl_desc_size;
710 u8 bl_desc[bl_desc_size];
712 /* Find the bootloader descriptor for our blob and copy it */
713 if (blob == acr->load_blob) {
714 load_hdr = &acr->load_bl_header;
715 } else if (blob == acr->unload_blob) {
716 load_hdr = &acr->unload_bl_header;
718 nvkm_error(_acr->subdev, "invalid secure boot blob!\n");
723 * Copy HS bootloader data
725 nvkm_falcon_load_dmem(falcon, hsbl_data, 0x0, hsbl_desc->data_size, 0);
727 /* Copy HS bootloader code to end of IMEM */
728 nvkm_falcon_load_imem(falcon, hsbl_code, falcon->code.limit - code_size,
729 code_size, hsbl_desc->start_tag, 0, false);
731 /* Generate the BL header */
732 acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset);
735 * Copy HS BL header where the HS descriptor expects it to be
737 nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
744 * acr_r352_reset() - execute secure boot from the prepared state
746 * Load the HS bootloader and ask the falcon to run it. This will in turn
747 * load the HS firmware and run it, so once the falcon stops all the managed
748 * falcons should have their LS firmware loaded and be ready to run.
751 acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
752 enum nvkm_secboot_falcon falcon)
754 struct acr_r352 *acr = acr_r352(_acr);
757 /* Make sure all blobs are ready */
758 ret = acr_r352_load_blobs(acr, sb);
763 * Dummy GM200 implementation: perform secure boot each time we are
764 * called on FECS. Since only FECS and GPCCS are managed and started
765 * together, this ought to be safe.
767 * Once we have proper PMU firmware and support, this will be changed
768 * to a proper call to the PMU method.
770 if (falcon != NVKM_SECBOOT_FALCON_FECS)
773 /* If WPR is set and we have an unload blob, run it to unlock WPR */
774 if (acr->unload_blob &&
775 acr->falcon_state[NVKM_SECBOOT_FALCON_FECS] != NON_SECURE) {
776 ret = sb->func->run_blob(sb, acr->unload_blob);
781 /* Reload all managed falcons */
782 ret = sb->func->run_blob(sb, acr->load_blob);
787 acr->falcon_state[falcon] = RESET;
792 acr_r352_start(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
793 enum nvkm_secboot_falcon falcon)
795 struct acr_r352 *acr = acr_r352(_acr);
796 const struct nvkm_subdev *subdev = &sb->subdev;
800 case NVKM_SECBOOT_FALCON_FECS:
803 case NVKM_SECBOOT_FALCON_GPCCS:
807 nvkm_error(subdev, "cannot start unhandled falcon!\n");
811 nvkm_wr32(subdev->device, base + 0x130, 0x00000002);
812 acr->falcon_state[falcon] = RUNNING;
818 acr_r352_fini(struct nvkm_acr *_acr, struct nvkm_secboot *sb, bool suspend)
820 struct acr_r352 *acr = acr_r352(_acr);
824 /* Run the unload blob to unprotect the WPR region */
825 if (acr->unload_blob &&
826 acr->falcon_state[NVKM_SECBOOT_FALCON_FECS] != NON_SECURE)
827 ret = sb->func->run_blob(sb, acr->unload_blob);
829 for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
830 acr->falcon_state[i] = NON_SECURE;
836 acr_r352_dtor(struct nvkm_acr *_acr)
838 struct acr_r352 *acr = acr_r352(_acr);
840 nvkm_gpuobj_del(&acr->unload_blob);
842 kfree(acr->hsbl_blob);
843 nvkm_gpuobj_del(&acr->load_blob);
844 nvkm_gpuobj_del(&acr->ls_blob);
849 const struct acr_r352_ls_func
850 acr_r352_ls_fecs_func = {
851 .load = acr_ls_ucode_load_fecs,
852 .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
853 .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
856 const struct acr_r352_ls_func
857 acr_r352_ls_gpccs_func = {
858 .load = acr_ls_ucode_load_gpccs,
859 .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
860 .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
861 /* GPCCS will be loaded using PRI */
862 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
865 const struct acr_r352_func
867 .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc,
868 .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
870 [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func,
871 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func,
875 static const struct nvkm_acr_func
876 acr_r352_base_func = {
877 .dtor = acr_r352_dtor,
878 .fini = acr_r352_fini,
879 .load = acr_r352_load,
880 .reset = acr_r352_reset,
881 .start = acr_r352_start,
885 acr_r352_new_(const struct acr_r352_func *func,
886 enum nvkm_secboot_falcon boot_falcon,
887 unsigned long managed_falcons)
889 struct acr_r352 *acr;
891 acr = kzalloc(sizeof(*acr), GFP_KERNEL);
893 return ERR_PTR(-ENOMEM);
895 acr->base.boot_falcon = boot_falcon;
896 acr->base.managed_falcons = managed_falcons;
897 acr->base.func = &acr_r352_base_func;
904 acr_r352_new(unsigned long managed_falcons)
906 return acr_r352_new_(&acr_r352_func, NVKM_SECBOOT_FALCON_PMU,