@@ -45,10 +45,11 @@ static ulong spl_nand_fit_read(struct spl_load_info *load, ulong offs,
int err;
ulong sector;
- sector = *(int *)load->priv;
offs *= load->bl_len;
size *= load->bl_len;
+ sector = *(int *)load->priv;
offs = sector + nand_spl_adjust_offset(sector, offs - sector);
+
err = nand_spl_load_image(offs, size, dst);
if (err)
return 0;
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2014 Panasonic Corporation
* Copyright (C) 2013-2014, Altera Corporation <www.altera.com>
- * Copyright (C) 2009-2010, Intel Corporation and its suppliers.
+ * Copyright (C) 2009-2022, Intel Corporation and its suppliers.
*/
#include <common.h>
@@ -1374,3 +1374,92 @@ free_buf:
return ret;
}
+
+#ifdef CONFIG_SPL_BUILD
+struct mtd_info *nand_get_mtd(void)
+{
+ struct mtd_info *mtd;
+
+ mtd = get_nand_dev_by_index(nand_curr_device);
+ if (!mtd)
+ hang();
+
+ return mtd;
+}
+
+int nand_spl_load_image(u32 offset, u32 len, void *dst)
+{
+ size_t count = len, actual = 0, page_align_overhead = 0;
+ u32 page_align_offset = 0;
+ u8 *page_buffer;
+ int err = 0;
+ struct mtd_info *mtd;
+ if (!len || !dst)
+ return -EINVAL;
+
+ mtd = get_nand_dev_by_index(nand_curr_device);
+ if (!mtd)
+ hang();
+ mtd = nand_get_mtd();
+
+ if ((offset & (mtd->writesize - 1)) != 0) {
+ page_buffer = malloc_cache_aligned(mtd->writesize);
+ if (!page_buffer) {
+ debug("Error: allocating buffer\n");
+ return -ENOMEM;
+ }
+ page_align_overhead = offset % mtd->writesize;
+ page_align_offset = (offset / mtd->writesize) * mtd->writesize;
+ count = mtd->writesize;
+ err = nand_read_skip_bad(mtd, page_align_offset, &count,
+ &actual, mtd->size, page_buffer);
+ if (err)
+ return err;
+ count -= page_align_overhead;
+ count = min((size_t)len, count);
+ memcpy(dst, page_buffer + page_align_overhead, count);
+ free(page_buffer);
+ len -= count;
+ if (!len)
+ return err;
+ offset += count;
+ dst += count;
+ count = len;
+ }
+ return nand_read_skip_bad(mtd, offset, &count, &actual, mtd->size, dst);
+}
+
+/*
+ * This function is to adjust the load offset to skip bad blocks.
+ * The Denali NAND load image does skip bad blocks during read,
+ * hence this function is returning the offset as it is.
+ * The offset at which the image to be loaded from NAND is located is
+ * retrieved from the itb header. The presence of bad blocks in the area
+ * of the NAND where the itb image is located could invalidate the offset
+ * which must therefore be adjusted taking into account the state of the
+ * sectors concerned
+ */
+u32 nand_spl_adjust_offset(u32 sector, u32 offs)
+{
+ u32 sector_align_offset, sector_align_end_offset;
+ struct mtd_info *mtd;
+
+ mtd = nand_get_mtd();
+
+ sector_align_offset = sector & (~(mtd->erasesize - 1));
+
+ sector_align_end_offset = (sector + offs) & (~(mtd->erasesize - 1));
+
+ while (sector_align_offset <= sector_align_end_offset) {
+ if (nand_block_isbad(mtd, sector_align_offset)) {
+ offs += mtd->erasesize;
+ sector_align_end_offset += mtd->erasesize;
+ }
+ sector_align_offset += mtd->erasesize;
+ }
+
+ return offs;
+}
+
+void nand_deselect(void) {}
+#endif