@@ -36,6 +36,9 @@
#define NAND_ECC_STATUS_1_3_CORRECTED BIT(4)
#define NAND_ECC_STATUS_7_8_CORRECTED (BIT(4) | BIT(3))
+#define MICRON_SHALLOW_ERASE_MIN_PAGE 15
+#define MICRON_PAGE_MASK_TRIGGER GENMASK(MICRON_SHALLOW_ERASE_MIN_PAGE, 0)
+
struct nand_onfi_vendor_micron {
u8 two_plane_read;
u8 read_cache;
@@ -64,6 +67,7 @@ struct micron_on_die_ecc {
struct micron_nand {
struct micron_on_die_ecc ecc;
+ u16 *writtenp;
};
static int micron_nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
@@ -429,6 +433,93 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
return MICRON_ON_DIE_SUPPORTED;
}
+static int micron_nand_pre_erase(struct nand_chip *chip, u32 eraseblock)
+{
+ struct micron_nand *micron = nand_get_manufacturer_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 last_page = MICRON_SHALLOW_ERASE_MIN_PAGE - 1;
+ u32 page;
+ u8 *data_buf;
+ int ret, i;
+
+ data_buf = nand_get_data_buf(chip);
+ WARN_ON(!data_buf);
+
+ if (likely(micron->writtenp[eraseblock] & BIT(last_page)))
+ return 0;
+
+ page = eraseblock << (chip->phys_erase_shift - chip->page_shift);
+
+ if (unlikely(micron->writtenp[eraseblock] == 0)) {
+ ret = nand_read_page_raw(chip, data_buf, 1, page + last_page);
+ if (ret)
+ return ret; /* Read error */
+ ret = nand_check_is_erased_page(chip, data_buf, true);
+ if (!ret)
+ return 0;
+ }
+
+ memset(data_buf, 0x00, mtd->writesize);
+
+ for (i = 0; i < MICRON_SHALLOW_ERASE_MIN_PAGE; i++) {
+ ret = nand_write_page_raw(chip, data_buf, false, page + i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int micron_nand_post_erase(struct nand_chip *chip, u32 eraseblock)
+{
+ struct micron_nand *micron = nand_get_manufacturer_data(chip);
+
+ if (!micron)
+ return -EINVAL;
+
+ micron->writtenp[eraseblock] = 0;
+
+ return 0;
+}
+
+static int micron_nand_write_oob(struct nand_chip *chip, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct micron_nand *micron = nand_get_manufacturer_data(chip);
+ u32 eb_sz = nanddev_eraseblock_size(&chip->base);
+ u32 p_sz = nanddev_page_size(&chip->base);
+ u32 ppeb = nanddev_pages_per_eraseblock(&chip->base);
+ u32 nb_p_tot = ops->len / p_sz;
+ u32 first_eb = DIV_ROUND_DOWN_ULL(to, eb_sz);
+ u32 first_p = DIV_ROUND_UP_ULL(to - (first_eb * eb_sz), p_sz);
+ u32 nb_eb = DIV_ROUND_UP_ULL(first_p + nb_p_tot, ppeb);
+ u32 remaining_p, eb, nb_p;
+ int ret;
+
+ ret = nand_write_oob_nand(chip, to, ops);
+
+ if (ret || ops->len != ops->retlen)
+ return ret;
+
+ /* Mark the last pages of the first erase block to write */
+ nb_p = min(nb_p_tot, ppeb - first_p);
+ micron->writtenp[first_eb] |= GENMASK(first_p + nb_p, 0) &
+ MICRON_PAGE_MASK_TRIGGER;
+ remaining_p = nb_p_tot - nb_p;
+
+ /* Mark all the pages of all "in-the-middle" erase blocks */
+ for (eb = first_eb + 1; eb < first_eb + nb_eb - 1; eb++) {
+ micron->writtenp[eb] |= MICRON_PAGE_MASK_TRIGGER;
+ remaining_p -= ppeb;
+ }
+
+ /* Mark the first pages of the last erase block to write */
+ if (remaining_p)
+ micron->writtenp[eb] |= GENMASK(remaining_p - 1, 0) &
+ MICRON_PAGE_MASK_TRIGGER;
+ return 0;
+}
+
static int micron_nand_init(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
@@ -515,6 +606,17 @@ static int micron_nand_init(struct nand_chip *chip)
}
}
+ if (nand_is_slc(chip)) {
+ micron->writtenp = kcalloc(nanddev_neraseblocks(&chip->base),
+ sizeof(u16), GFP_KERNEL);
+ if (!micron->writtenp)
+ goto err_free_manuf_data;
+
+ chip->ops.write_oob = micron_nand_write_oob;
+ chip->ops.pre_erase = micron_nand_pre_erase;
+ chip->ops.post_erase = micron_nand_post_erase;
+ }
+
return 0;
err_free_manuf_data: