blob: 2618109cee0fc7f72e87315ee247486da6f830ef [file] [log] [blame]
Sean Anderson77507412023-11-08 11:48:47 -05001/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Copyright (C) Sean Anderson <seanga2@gmail.com>
4 */
5#ifndef _SPL_LOAD_H_
6#define _SPL_LOAD_H_
7
8#include <image.h>
9#include <imx_container.h>
10#include <mapmem.h>
11#include <spl.h>
12
13static inline int _spl_load(struct spl_image_info *spl_image,
14 const struct spl_boot_device *bootdev,
15 struct spl_load_info *info, size_t size,
16 size_t offset)
17{
18 struct legacy_img_hdr *header =
19 spl_get_load_buffer(-sizeof(*header), sizeof(*header));
20 ulong base_offset, image_offset, overhead;
21 int read, ret;
22
23 read = info->read(info, offset, ALIGN(sizeof(*header),
24 spl_get_bl_len(info)), header);
25 if (read < sizeof(*header))
26 return -EIO;
27
28 if (image_get_magic(header) == FDT_MAGIC) {
29 if (IS_ENABLED(CONFIG_SPL_LOAD_FIT_FULL)) {
30 void *buf;
31
32 /*
33 * In order to support verifying images in the FIT, we
34 * need to load the whole FIT into memory. Try and
35 * guess how much we need to load by using the total
36 * size. This will fail for FITs with external data,
37 * but there's not much we can do about that.
38 */
39 if (!size)
40 size = round_up(fdt_totalsize(header), 4);
41 buf = map_sysmem(CONFIG_SYS_LOAD_ADDR, size);
42 read = info->read(info, offset,
43 ALIGN(size, spl_get_bl_len(info)),
44 buf);
45 if (read < size)
46 return -EIO;
47
48 return spl_parse_image_header(spl_image, bootdev, buf);
49 }
50
51 if (IS_ENABLED(CONFIG_SPL_LOAD_FIT))
52 return spl_load_simple_fit(spl_image, info, offset,
53 header);
54 }
55
56 if (IS_ENABLED(CONFIG_SPL_LOAD_IMX_CONTAINER) &&
57 valid_container_hdr((void *)header))
58 return spl_load_imx_container(spl_image, info, offset);
59
60 if (IS_ENABLED(CONFIG_SPL_LZMA) &&
61 image_get_magic(header) == IH_MAGIC &&
62 image_get_comp(header) == IH_COMP_LZMA) {
63 spl_image->flags |= SPL_COPY_PAYLOAD_ONLY;
64 ret = spl_parse_image_header(spl_image, bootdev, header);
65 if (ret)
66 return ret;
67
68 return spl_load_legacy_lzma(spl_image, info, offset);
69 }
70
71 ret = spl_parse_image_header(spl_image, bootdev, header);
72 if (ret)
73 return ret;
74
75 base_offset = spl_image->offset;
76 /* Only NOR sets this flag. */
77 if (IS_ENABLED(CONFIG_SPL_NOR_SUPPORT) &&
78 spl_image->flags & SPL_COPY_PAYLOAD_ONLY)
79 base_offset += sizeof(*header);
80 image_offset = ALIGN_DOWN(base_offset, spl_get_bl_len(info));
81 overhead = base_offset - image_offset;
82 size = ALIGN(spl_image->size + overhead, spl_get_bl_len(info));
83
84 read = info->read(info, offset + image_offset, size,
85 map_sysmem(spl_image->load_addr - overhead, size));
86 return read < spl_image->size ? -EIO : 0;
87}
88
89/*
90 * Although spl_load results in size reduction for callers, this is generally
91 * not enough to counteract the bloat if there is only one caller. The core
92 * problem is that the compiler can't optimize across translation units. The
93 * general solution to this is CONFIG_LTO, but that is not available on all
94 * architectures. Perform a pseudo-LTO just for this function by declaring it
95 * inline if there is one caller, and extern otherwise.
96 */
97#define SPL_LOAD_USERS \
Sean Anderson6029a0e2023-11-08 11:48:54 -050098 IS_ENABLED(CONFIG_SPL_BLK_FS) + \
Sean Andersonb8ed7222023-11-08 11:48:48 -050099 IS_ENABLED(CONFIG_SPL_FS_EXT4) + \
Sean Anderson682184e2023-11-08 11:48:49 -0500100 IS_ENABLED(CONFIG_SPL_FS_FAT) + \
Sean Anderson5d3401a2023-11-08 11:48:50 -0500101 IS_ENABLED(CONFIG_SPL_SYS_MMCSD_RAW_MODE) + \
Sean Anderson11f83462023-11-08 11:48:51 -0500102 (IS_ENABLED(CONFIG_SPL_NAND_SUPPORT) && !IS_ENABLED(CONFIG_SPL_UBI)) + \
Sean Anderson2e5476b2023-11-08 11:48:52 -0500103 IS_ENABLED(CONFIG_SPL_NET) + \
Sean Andersoncbe86572023-11-08 11:48:53 -0500104 IS_ENABLED(CONFIG_SPL_NOR_SUPPORT) + \
Sean Anderson77507412023-11-08 11:48:47 -0500105 0
106
107#if SPL_LOAD_USERS > 1
108/**
109 * spl_load() - Parse a header and load the image
110 * @spl_image: Image data which will be filled in by this function
111 * @bootdev: The device to load from
112 * @info: Describes how to load additional information from @bootdev. At the
113 * minimum, read() and bl_len must be populated.
114 * @size: The size of the image, in bytes, if it is known in advance. Some boot
115 * devices (such as filesystems) know how big an image is before parsing
116 * the header. If 0, then the size will be determined from the header.
117 * @offset: The offset from the start of @bootdev, in bytes. This should have
118 * the offset @header was loaded from. It will be added to any offsets
119 * passed to @info->read().
120 *
121 * This function determines the image type (FIT, legacy, i.MX, raw, etc), calls
122 * the appropriate parsing function, determines the load address, and the loads
123 * the image from storage. It is designed to replace ad-hoc image loading which
124 * may not support all image types (especially when config options are
125 * involved).
126 *
127 * Return: 0 on success, or a negative error on failure
128 */
129int spl_load(struct spl_image_info *spl_image,
130 const struct spl_boot_device *bootdev, struct spl_load_info *info,
131 size_t size, size_t offset);
132#else
133static inline int spl_load(struct spl_image_info *spl_image,
134 const struct spl_boot_device *bootdev,
135 struct spl_load_info *info, size_t size,
136 size_t offset)
137{
138 return _spl_load(spl_image, bootdev, info, size, offset);
139}
140#endif
141
142#endif /* _SPL_LOAD_H_ */