diff mbox

[RFC] UBI: Implement Fastmap support

Message ID 1337608916-49771-2-git-send-email-richard@nod.at
State RFC
Headers show

Commit Message

Richard Weinberger May 21, 2012, 2:01 p.m. UTC
Fastmap (aka checkpointing) allows attaching of an UBI volume in nearly
constant time. Only a fixed number of PEBs has to be scanned.

Signed-off-by: Richard Weinberger <richard@nod.at>
---
 drivers/mtd/ubi/Makefile    |    2 +-
 drivers/mtd/ubi/attach.c    |   68 +++-
 drivers/mtd/ubi/build.c     |   23 +
 drivers/mtd/ubi/eba.c       |   18 +-
 drivers/mtd/ubi/fastmap.c   | 1132 +++++++++++++++++++++++++++++++++++++++++++
 drivers/mtd/ubi/ubi-media.h |  118 +++++
 drivers/mtd/ubi/ubi.h       |   61 +++-
 drivers/mtd/ubi/wl.c        |  170 +++++++-
 8 files changed, 1568 insertions(+), 24 deletions(-)
 create mode 100644 drivers/mtd/ubi/fastmap.c

Comments

Artem Bityutskiy May 22, 2012, 1:43 p.m. UTC | #1
Uhh, too many helpers.

On Mon, 2012-05-21 at 16:01 +0200, Richard Weinberger wrote:
>  /**
> - * ubi_attach - attach an MTD device.
> - * @ubi: UBI device descriptor
> + * scan_fastmap - attach MTD device using fastmap.
> + * @ubi: UBI device description object
>   *
> - * This function returns zero in case of success and a negative error code in
> - * case of failure.
> + * This function attaches a MTD device using a fastmap and returns complete
> + * information about it in form of a "struct ubi_attach_info" object. In case
> + * of failure, an error code is returned.
>   */
> -int ubi_attach(struct ubi_device *ubi)
> +static struct ubi_attach_info *scan_fastmap(struct ubi_device *ubi)
> +{
> +       int fm_start;
> +
> +       fm_start = ubi_find_fastmap(ubi);
> +       if (fm_start < 0)
> +               return ERR_PTR(-ENOENT);
> +
> +       return ubi_read_fastmap(ubi, fm_start);
> +}

This helper which does not do anything useful should not exist - just
teach 'ubi_read_fastmap()' to return 1 if the fastmap is not found, and
< 0 on error, and 0 on success. No one in attach.c should be interested
in fm_start. So this helper should die. See below as well.

> +
> +static int do_attach(struct ubi_device *ubi, bool fullscan)
>  {
>         int err;
>         struct ubi_attach_info *ai;
>  
> -       ai = scan_all(ubi);
> +       if (fullscan)
> +               ai = scan_all(ubi);
> +       else
> +               ai = scan_fastmap(ubi);
> +
>         if (IS_ERR(ai))
>                 return PTR_ERR(ai);
>  
> @@ -1256,6 +1277,31 @@ out_ai:
>  }

Another useless helper function, should die, see below.

>  
>  /**
> + * ubi_attach - attach an MTD device.
> + * @ubi: UBI device descriptor
> + *
> + * This function returns zero in case of success and a negative error code in
> + * case of failure.
> + */
> +int ubi_attach(struct ubi_device *ubi)
> +{
> +       int err;
> +
> +       err = do_attach(ubi, false);
> +       if (err) {
> +               if (err != -ENOENT)
> +                       ubi_err("Attach by fastmap failed! " \
> +                               "Falling back to attach by scanning. " \
> +                               "error = %i\n", err);
> +
> +               ubi->attached_by_scanning = true;
> +               err = do_attach(ubi, true);
> +       }
> +
> +       return err;
> +} 

Just add this code to this function:

       err = ubi_read_fastmap();
       if (err < 0)
               return err;
       if (err == 2)
               return scan_all();

       ... rest of the common nitializations (EBA, WL)...

       return err;
Shmulik Ladkani May 22, 2012, 3:01 p.m. UTC | #2
Thanks Richard,

Just reviewed your patch, besides fastmap.c and ubi-media.h (will get to
them soon).
Some comments below.

On Mon, 21 May 2012 16:01:56 +0200 Richard Weinberger <richard@nod.at> wrote:
> Fastmap (aka checkpointing) allows attaching of an UBI volume in nearly
> constant time. Only a fixed number of PEBs has to be scanned.
> 
> Signed-off-by: Richard Weinberger <richard@nod.at>
> ---
>  drivers/mtd/ubi/Makefile    |    2 +-
>  drivers/mtd/ubi/attach.c    |   68 +++-
>  drivers/mtd/ubi/build.c     |   23 +
>  drivers/mtd/ubi/eba.c       |   18 +-
>  drivers/mtd/ubi/fastmap.c   | 1132 +++++++++++++++++++++++++++++++++++++++++++
>  drivers/mtd/ubi/ubi-media.h |  118 +++++
>  drivers/mtd/ubi/ubi.h       |   61 +++-
>  drivers/mtd/ubi/wl.c        |  170 +++++++-
>  8 files changed, 1568 insertions(+), 24 deletions(-)
>  create mode 100644 drivers/mtd/ubi/fastmap.c

The Kconfig changes are not present in this patch.

> @@ -977,7 +977,12 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
>  	}
>  
>  	vol_id = be32_to_cpu(vidh->vol_id);
> -	if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
> +
> +	if (vol_id > UBI_MAX_VOLUMES &&
> +		vol_id != UBI_LAYOUT_VOLUME_ID &&
> +		((vol_id != UBI_FM_SB_VOLUME_ID &&
> +		vol_id != UBI_FM_DATA_VOLUME_ID) ||
> +		ubi->attached_by_scanning == true)) {

I think it would be easier to understand if 'attached_by_scanning' is
evaluated first:

+		(ubi->attached_by_scanning == true ||
+		(vol_id != UBI_FM_SB_VOLUME_ID &&
+		vol_id != UBI_FM_DATA_VOLUME_ID))) {

>  /**
> - * ubi_attach - attach an MTD device.
> - * @ubi: UBI device descriptor
> + * scan_fastmap - attach MTD device using fastmap.
> + * @ubi: UBI device description object
>   *
> - * This function returns zero in case of success and a negative error code in
> - * case of failure.
> + * This function attaches a MTD device using a fastmap and returns complete
> + * information about it in form of a "struct ubi_attach_info" object. In case
> + * of failure, an error code is returned.
>   */
> -int ubi_attach(struct ubi_device *ubi)
> +static struct ubi_attach_info *scan_fastmap(struct ubi_device *ubi)
> +{
> +	int fm_start;
> +
> +	fm_start = ubi_find_fastmap(ubi);
> +	if (fm_start < 0)
> +		return ERR_PTR(-ENOENT);
> +
> +	return ubi_read_fastmap(ubi, fm_start);
> +}

I think the fastmap should include the bad peb count.
(formerly, it was detected by scanning all pebs).
Otherwise UBI is unaware of the correct number of good/available
pebs (reflected in good_peb_count/avail_pebs).

> diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
> index 2c5ed5c..ef5d7b7 100644
> --- a/drivers/mtd/ubi/build.c
> +++ b/drivers/mtd/ubi/build.c
> @@ -144,6 +144,16 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
>  
>  	ubi_do_get_device_info(ubi, &nt.di);
>  	ubi_do_get_volume_info(ubi, vol, &nt.vi);
> +
> +	switch (ntype) {
> +	case UBI_VOLUME_ADDED:
> +	case UBI_VOLUME_REMOVED:
> +	case UBI_VOLUME_RESIZED:
> +	case UBI_VOLUME_RENAMED:
> +		if (ubi_update_fastmap(ubi))
> +			ubi_err("Unable to update fastmap!");

In the error case, what are the consequences leaving the older on-flash
fastmap? Shouldn't it be invalidated?

> +int ubi_update_fastmap(struct ubi_device *ubi)
> +{
> +	int ret, i;
> +	struct ubi_fastmap *new_fm;
> +
> +	if (ubi->ro_mode)
> +		return 0;
> +
> +	new_fm = kmalloc(sizeof(*new_fm), GFP_KERNEL);
> +	if (!new_fm)
> +		return -ENOMEM;
> +
> +	ubi->old_fm = ubi->fm;
> +	ubi->fm = NULL;
> +
> +	if (ubi->old_fm) {
> +		spin_lock(&ubi->wl_lock);
> +		new_fm->peb[0] = ubi_wl_get_fm_peb(ubi, UBI_FM_MAX_START);
> +		spin_unlock(&ubi->wl_lock);

Same 3 lines are found later, in the 'else' clause.
You can place these prior the 'if (ubi->old_fm)'.

> diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
> index 5275632..299a601 100644
> --- a/drivers/mtd/ubi/ubi.h
> +++ b/drivers/mtd/ubi/ubi.h
> @@ -202,6 +202,39 @@ struct ubi_rename_entry {
>  struct ubi_volume_desc;
>  
>  /**
> + * struct ubi_fastmap - in-memory fastmap data structure.
> + * @peb: PEBs used by the current fastamp

s/fastamp/fastmap/

> + * @ec: the erase counter of each used PEB
> + * @size: size of the fastmap in bytes
> + * @used_blocks: number of used PEBs
> + */
> +struct ubi_fastmap {

Suggestion: maybe ubi_fastmap_location / ubi_fastmap_layout /
ubi_fastmap_position ? slightly more explanatory than 'ubi_fastmap'.

> @@ -427,6 +467,12 @@ struct ubi_device {
>  	struct rb_root ltree;
>  	struct mutex alc_mutex;
>  
> +	/* Fastmap stuff */
> +	struct ubi_fastmap *fm;
> +	struct ubi_fastmap *old_fm;
> +	struct ubi_fm_pool fm_pool;
> +	bool attached_by_scanning;

Convention around MTD is 'int' for booleans.

> @@ -670,6 +717,9 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
>  int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
>  void ubi_wl_close(struct ubi_device *ubi);
>  int ubi_thread(void *u);
> +int ubi_wl_get_fm_peb(struct ubi_device *ubi, int max_pnum);
> +int ubi_wl_put_fm_peb(struct ubi_device *ubi, int pnum, int torture);
> +int ubi_is_fm_block(struct ubi_device *ubi, int pnum);

No users of 'ubi_is_fm_block' outside of wl.c (where it is defined).
Is it deliberately a part of the ubi interface?

> @@ -367,13 +386,70 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff)
>  }
>  
>  /**
> - * ubi_wl_get_peb - get a physical eraseblock.
> + * find_early_wl_entry - find wear-leveling entry with a low pnum.

s/a low/the lowest/

> + * @root: the RB-tree where to look for
> + * @max_pnum: highest possible pnum
> + *
> + * This function looks for a wear leveling entry containing a eb which
> + * is in front of the memory.

IMO can remove the last comment.

> +static struct ubi_wl_entry *find_early_wl_entry(struct rb_root *root,
> +						int max_pnum)
> +{
> +	struct rb_node *p;
> +	struct ubi_wl_entry *e, *victim = NULL;
> +
> +	ubi_rb_for_each_entry(p, e, root, u.rb) {
> +		if (e->pnum < max_pnum) {
> +			victim = e;
> +			max_pnum = e->pnum;
> +		}
> +	}
> +
> +	return victim;
> +}
> +
> +/**
> + * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
> + * @ubi: UBI device description object
> + * @max_pnum: the highest acceptable erase block number
> + *
> + * The function returns a physical erase block with a given maximal number
> + * and removes it from the wl subsystem.
> + * Must be called with wl_lock held!
> + */
> +int ubi_wl_get_fm_peb(struct ubi_device *ubi, int max_pnum)
> +{
> +	int ret = -ENOSPC;
> +	struct ubi_wl_entry *e;
> +
> +	if (!ubi->free.rb_node) {
> +		ubi_err("no free eraseblocks");
> +
> +		goto out;
> +	}
> +
> +	e = find_early_wl_entry(&ubi->free, max_pnum);

This picks the eb with the lowest pnum within 'ubi->free'.

When called with INT_MAX (for the FM_DATA), why do you need to pick a
free eb with the minimal pnum? The FM_DATA EBs may reside everywhere (as
the FM_SB holds their location).
So why not pick the eb with a medium EC value (as done for standard
get_peb calls)? That might be better wear-leveling wise.

> +/* ubi_wl_get_peb - works exaclty like __ubi_wl_get_peb but keeps track of
> + * the fastmap pool.
> + */
> +int ubi_wl_get_peb(struct ubi_device *ubi)
> +{
> +	struct ubi_fm_pool *pool = &ubi->fm_pool;
> +
> +	/* pool contains no free blocks, create a new one
> +	 * and write a fastmap */
> +	if (pool->used == pool->size || !pool->size) {
> +		for (pool->size = 0; pool->size < pool->max_size;
> +				pool->size++) {
> +			pool->pebs[pool->size] = __ubi_wl_get_peb(ubi);
> +			if (pool->pebs[pool->size] < 0)
> +				break;
> +		}
> +
> +		pool->used = 0;
> +		ubi_update_fastmap(ubi);
> +	}
> +
> +	/* we got not a single free PEB */
> +	if (!pool->size)
> +		return -1;

Returning -ENOSPC is more appropriate and consistent with former
ubi_wl_get_peb implementation.

>  /**
> + * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
> + * sub-system.
> + *
> + * see: ubi_wl_put_peb()
> + */
> +int ubi_wl_put_fm_peb(struct ubi_device *ubi, int pnum, int torture)
> +{
> +	int i, err = 0;
> +	struct ubi_wl_entry *e;
> +
> +	dbg_wl("PEB %d", pnum);
> +	ubi_assert(pnum >= 0);
> +	ubi_assert(pnum < ubi->peb_count);
> +
> +	spin_lock(&ubi->wl_lock);
> +	e = ubi->lookuptbl[pnum];
> +
> +	/* This can happen if we recovered from a fastmap the very
> +	 * frist time and writing now a new one. In this case the wl system

s/frist/first/

> +	 * has never seen any PEB used by the original fastmap.
> +	 */
> +	if (!e) {
> +		ubi_assert(ubi->old_fm);
> +		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_ATOMIC);

Must it be GFP_ATOMIC?

> +		if (!e) {
> +			spin_unlock(&ubi->wl_lock);
> +			return -ENOMEM;

This is traumatic; you must return the PEB back to WL, but fail to do so
due to an internal error.
(BTW the return value of ubi_wl_put_fm_peb is not tested at the call
sites)
The system is left with a missing PEB.
Can't we guarantee ubi_wl_put_fm_peb is called only after wl_init is
completed?

Regards,
Shmulik
Richard Weinberger May 22, 2012, 4:55 p.m. UTC | #3
On 22.05.2012 17:01, Shmulik Ladkani wrote:
> Thanks Richard,
>
> Just reviewed your patch, besides fastmap.c and ubi-media.h (will get to
> them soon).
> Some comments below.

Thanks a lot!

> On Mon, 21 May 2012 16:01:56 +0200 Richard Weinberger<richard@nod.at>  wrote:
>> Fastmap (aka checkpointing) allows attaching of an UBI volume in nearly
>> constant time. Only a fixed number of PEBs has to be scanned.
>>
>> Signed-off-by: Richard Weinberger<richard@nod.at>
>> ---
>>   drivers/mtd/ubi/Makefile    |    2 +-
>>   drivers/mtd/ubi/attach.c    |   68 +++-
>>   drivers/mtd/ubi/build.c     |   23 +
>>   drivers/mtd/ubi/eba.c       |   18 +-
>>   drivers/mtd/ubi/fastmap.c   | 1132 +++++++++++++++++++++++++++++++++++++++++++
>>   drivers/mtd/ubi/ubi-media.h |  118 +++++
>>   drivers/mtd/ubi/ubi.h       |   61 +++-
>>   drivers/mtd/ubi/wl.c        |  170 +++++++-
>>   8 files changed, 1568 insertions(+), 24 deletions(-)
>>   create mode 100644 drivers/mtd/ubi/fastmap.c
>
> The Kconfig changes are not present in this patch.
>
>> @@ -977,7 +977,12 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
>>   	}
>>
>>   	vol_id = be32_to_cpu(vidh->vol_id);
>> -	if (vol_id>  UBI_MAX_VOLUMES&&  vol_id != UBI_LAYOUT_VOLUME_ID) {
>> +
>> +	if (vol_id>  UBI_MAX_VOLUMES&&
>> +		vol_id != UBI_LAYOUT_VOLUME_ID&&
>> +		((vol_id != UBI_FM_SB_VOLUME_ID&&
>> +		vol_id != UBI_FM_DATA_VOLUME_ID) ||
>> +		ubi->attached_by_scanning == true)) {
>
> I think it would be easier to understand if 'attached_by_scanning' is
> evaluated first:
>
> +		(ubi->attached_by_scanning == true ||
> +		(vol_id != UBI_FM_SB_VOLUME_ID&&
> +		vol_id != UBI_FM_DATA_VOLUME_ID))) {

Yep, fixed!

>>   /**
>> - * ubi_attach - attach an MTD device.
>> - * @ubi: UBI device descriptor
>> + * scan_fastmap - attach MTD device using fastmap.
>> + * @ubi: UBI device description object
>>    *
>> - * This function returns zero in case of success and a negative error code in
>> - * case of failure.
>> + * This function attaches a MTD device using a fastmap and returns complete
>> + * information about it in form of a "struct ubi_attach_info" object. In case
>> + * of failure, an error code is returned.
>>    */
>> -int ubi_attach(struct ubi_device *ubi)
>> +static struct ubi_attach_info *scan_fastmap(struct ubi_device *ubi)
>> +{
>> +	int fm_start;
>> +
>> +	fm_start = ubi_find_fastmap(ubi);
>> +	if (fm_start<  0)
>> +		return ERR_PTR(-ENOENT);
>> +
>> +	return ubi_read_fastmap(ubi, fm_start);
>> +}
>
> I think the fastmap should include the bad peb count.
> (formerly, it was detected by scanning all pebs).
> Otherwise UBI is unaware of the correct number of good/available
> pebs (reflected in good_peb_count/avail_pebs).

Yeah, I'll add these counters.

>> diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
>> index 2c5ed5c..ef5d7b7 100644
>> --- a/drivers/mtd/ubi/build.c
>> +++ b/drivers/mtd/ubi/build.c
>> @@ -144,6 +144,16 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
>>
>>   	ubi_do_get_device_info(ubi,&nt.di);
>>   	ubi_do_get_volume_info(ubi, vol,&nt.vi);
>> +
>> +	switch (ntype) {
>> +	case UBI_VOLUME_ADDED:
>> +	case UBI_VOLUME_REMOVED:
>> +	case UBI_VOLUME_RESIZED:
>> +	case UBI_VOLUME_RENAMED:
>> +		if (ubi_update_fastmap(ubi))
>> +			ubi_err("Unable to update fastmap!");
>
> In the error case, what are the consequences leaving the older on-flash
> fastmap? Shouldn't it be invalidated?

If the fastmap is not written complete the CRC check will fail next time 
and we fall back to scanning mode.

>> +int ubi_update_fastmap(struct ubi_device *ubi)
>> +{
>> +	int ret, i;
>> +	struct ubi_fastmap *new_fm;
>> +
>> +	if (ubi->ro_mode)
>> +		return 0;
>> +
>> +	new_fm = kmalloc(sizeof(*new_fm), GFP_KERNEL);
>> +	if (!new_fm)
>> +		return -ENOMEM;
>> +
>> +	ubi->old_fm = ubi->fm;
>> +	ubi->fm = NULL;
>> +
>> +	if (ubi->old_fm) {
>> +		spin_lock(&ubi->wl_lock);
>> +		new_fm->peb[0] = ubi_wl_get_fm_peb(ubi, UBI_FM_MAX_START);
>> +		spin_unlock(&ubi->wl_lock);
>
> Same 3 lines are found later, in the 'else' clause.
> You can place these prior the 'if (ubi->old_fm)'.

Fixed.

>> diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
>> index 5275632..299a601 100644
>> --- a/drivers/mtd/ubi/ubi.h
>> +++ b/drivers/mtd/ubi/ubi.h
>> @@ -202,6 +202,39 @@ struct ubi_rename_entry {
>>   struct ubi_volume_desc;
>>
>>   /**
>> + * struct ubi_fastmap - in-memory fastmap data structure.
>> + * @peb: PEBs used by the current fastamp
>
> s/fastamp/fastmap/
>
>> + * @ec: the erase counter of each used PEB
>> + * @size: size of the fastmap in bytes
>> + * @used_blocks: number of used PEBs
>> + */
>> +struct ubi_fastmap {
>
> Suggestion: maybe ubi_fastmap_location / ubi_fastmap_layout /
> ubi_fastmap_position ? slightly more explanatory than 'ubi_fastmap'.

Good idea. ubi_fastmap_position seems good to me.

>> @@ -427,6 +467,12 @@ struct ubi_device {
>>   	struct rb_root ltree;
>>   	struct mutex alc_mutex;
>>
>> +	/* Fastmap stuff */
>> +	struct ubi_fastmap *fm;
>> +	struct ubi_fastmap *old_fm;
>> +	struct ubi_fm_pool fm_pool;
>> +	bool attached_by_scanning;
>
> Convention around MTD is 'int' for booleans.

Fixed.

>> @@ -670,6 +717,9 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
>>   int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
>>   void ubi_wl_close(struct ubi_device *ubi);
>>   int ubi_thread(void *u);
>> +int ubi_wl_get_fm_peb(struct ubi_device *ubi, int max_pnum);
>> +int ubi_wl_put_fm_peb(struct ubi_device *ubi, int pnum, int torture);
>> +int ubi_is_fm_block(struct ubi_device *ubi, int pnum);
>
> No users of 'ubi_is_fm_block' outside of wl.c (where it is defined).
> Is it deliberately a part of the ubi interface?

True. Fixed.

>> @@ -367,13 +386,70 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff)
>>   }
>>
>>   /**
>> - * ubi_wl_get_peb - get a physical eraseblock.
>> + * find_early_wl_entry - find wear-leveling entry with a low pnum.
>
> s/a low/the lowest/

Fixed.

>> + * @root: the RB-tree where to look for
>> + * @max_pnum: highest possible pnum
>> + *
>> + * This function looks for a wear leveling entry containing a eb which
>> + * is in front of the memory.
>
> IMO can remove the last comment.

Documentation is like sex: when it is good, it is very, very good; and 
when it is bad, it is better than nothing.
- Dick Brandon

>> +static struct ubi_wl_entry *find_early_wl_entry(struct rb_root *root,
>> +						int max_pnum)
>> +{
>> +	struct rb_node *p;
>> +	struct ubi_wl_entry *e, *victim = NULL;
>> +
>> +	ubi_rb_for_each_entry(p, e, root, u.rb) {
>> +		if (e->pnum<  max_pnum) {
>> +			victim = e;
>> +			max_pnum = e->pnum;
>> +		}
>> +	}
>> +
>> +	return victim;
>> +}
>> +
>> +/**
>> + * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
>> + * @ubi: UBI device description object
>> + * @max_pnum: the highest acceptable erase block number
>> + *
>> + * The function returns a physical erase block with a given maximal number
>> + * and removes it from the wl subsystem.
>> + * Must be called with wl_lock held!
>> + */
>> +int ubi_wl_get_fm_peb(struct ubi_device *ubi, int max_pnum)
>> +{
>> +	int ret = -ENOSPC;
>> +	struct ubi_wl_entry *e;
>> +
>> +	if (!ubi->free.rb_node) {
>> +		ubi_err("no free eraseblocks");
>> +
>> +		goto out;
>> +	}
>> +
>> +	e = find_early_wl_entry(&ubi->free, max_pnum);
>
> This picks the eb with the lowest pnum within 'ubi->free'.
>
> When called with INT_MAX (for the FM_DATA), why do you need to pick a
> free eb with the minimal pnum? The FM_DATA EBs may reside everywhere (as
> the FM_SB holds their location).
> So why not pick the eb with a medium EC value (as done for standard
> get_peb calls)? That might be better wear-leveling wise.

Fair point.
I'll fix that.
Artem, any comments on that?

>> +/* ubi_wl_get_peb - works exaclty like __ubi_wl_get_peb but keeps track of
>> + * the fastmap pool.
>> + */
>> +int ubi_wl_get_peb(struct ubi_device *ubi)
>> +{
>> +	struct ubi_fm_pool *pool =&ubi->fm_pool;
>> +
>> +	/* pool contains no free blocks, create a new one
>> +	 * and write a fastmap */
>> +	if (pool->used == pool->size || !pool->size) {
>> +		for (pool->size = 0; pool->size<  pool->max_size;
>> +				pool->size++) {
>> +			pool->pebs[pool->size] = __ubi_wl_get_peb(ubi);
>> +			if (pool->pebs[pool->size]<  0)
>> +				break;
>> +		}
>> +
>> +		pool->used = 0;
>> +		ubi_update_fastmap(ubi);
>> +	}
>> +
>> +	/* we got not a single free PEB */
>> +	if (!pool->size)
>> +		return -1;
>
> Returning -ENOSPC is more appropriate and consistent with former
> ubi_wl_get_peb implementation.

Fixed.

>>   /**
>> + * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
>> + * sub-system.
>> + *
>> + * see: ubi_wl_put_peb()
>> + */
>> +int ubi_wl_put_fm_peb(struct ubi_device *ubi, int pnum, int torture)
>> +{
>> +	int i, err = 0;
>> +	struct ubi_wl_entry *e;
>> +
>> +	dbg_wl("PEB %d", pnum);
>> +	ubi_assert(pnum>= 0);
>> +	ubi_assert(pnum<  ubi->peb_count);
>> +
>> +	spin_lock(&ubi->wl_lock);
>> +	e = ubi->lookuptbl[pnum];
>> +
>> +	/* This can happen if we recovered from a fastmap the very
>> +	 * frist time and writing now a new one. In this case the wl system
>
> s/frist/first/

Fixed.

>> +	 * has never seen any PEB used by the original fastmap.
>> +	 */
>> +	if (!e) {
>> +		ubi_assert(ubi->old_fm);
>> +		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_ATOMIC);
>
> Must it be GFP_ATOMIC?

Yes. This function is called under a spinlock.

>> +		if (!e) {
>> +			spin_unlock(&ubi->wl_lock);
>> +			return -ENOMEM;
>
> This is traumatic; you must return the PEB back to WL, but fail to do so
> due to an internal error.

This is not easy. In the !e case I need memory.

> (BTW the return value of ubi_wl_put_fm_peb is not tested at the call
> sites)
> The system is left with a missing PEB.
> Can't we guarantee ubi_wl_put_fm_peb is called only after wl_init is
> completed?

Why would that help?

Thanks,
//richard
Shmulik Ladkani May 22, 2012, 6:18 p.m. UTC | #4
Hi Richard,

On Tue, 22 May 2012 18:55:10 +0200 Richard Weinberger <richard@nod.at> wrote:
> >> diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
> >> index 2c5ed5c..ef5d7b7 100644
> >> --- a/drivers/mtd/ubi/build.c
> >> +++ b/drivers/mtd/ubi/build.c
> >> @@ -144,6 +144,16 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
> >>
> >>   	ubi_do_get_device_info(ubi,&nt.di);
> >>   	ubi_do_get_volume_info(ubi, vol,&nt.vi);
> >> +
> >> +	switch (ntype) {
> >> +	case UBI_VOLUME_ADDED:
> >> +	case UBI_VOLUME_REMOVED:
> >> +	case UBI_VOLUME_RESIZED:
> >> +	case UBI_VOLUME_RENAMED:
> >> +		if (ubi_update_fastmap(ubi))
> >> +			ubi_err("Unable to update fastmap!");
> >
> > In the error case, what are the consequences leaving the older on-flash
> > fastmap? Shouldn't it be invalidated?
> 
> If the fastmap is not written complete the CRC check will fail next time 
> and we fall back to scanning mode.

There are many fail-points within 'ubi_update_fastmap' where an error
code is returned, long before it attempts writing to the media.
If 'ubi_update_fastmap' fails due to those, then the old on-flash
fastmap is still valid.
However we know a volume has been just changed.
Will the system be coherent after a sudden reboot (that occurs prior
next successful fastmap update)?

> >> diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
> >> index 5275632..299a601 100644
> >> --- a/drivers/mtd/ubi/ubi.h
> >> +++ b/drivers/mtd/ubi/ubi.h
> >> @@ -202,6 +202,39 @@ struct ubi_rename_entry {
> >>   struct ubi_volume_desc;
> >>
> >>   /**
> >> + * struct ubi_fastmap - in-memory fastmap data structure.
> >> + * @peb: PEBs used by the current fastamp
> >
> > s/fastamp/fastmap/
> >
> >> + * @ec: the erase counter of each used PEB
> >> + * @size: size of the fastmap in bytes
> >> + * @used_blocks: number of used PEBs
> >> + */
> >> +struct ubi_fastmap {
> >
> > Suggestion: maybe ubi_fastmap_location / ubi_fastmap_layout /
> > ubi_fastmap_position ? slightly more explanatory than 'ubi_fastmap'.
> 
> Good idea. ubi_fastmap_position seems good to me.

Sounds good. You could amend the comments above the struct as well.

> >> + * @root: the RB-tree where to look for
> >> + * @max_pnum: highest possible pnum
> >> + *
> >> + * This function looks for a wear leveling entry containing a eb which
> >> + * is in front of the memory.
> >
> > IMO can remove the last comment.
> 
> Documentation is like sex: when it is good, it is very, very good; and 
> when it is bad, it is better than nothing.
> - Dick Brandon

:-)

BTW what is "in front of the memory"? ;)

> >> +		if (!e) {
> >> +			spin_unlock(&ubi->wl_lock);
> >> +			return -ENOMEM;
> >
> > This is traumatic; you must return the PEB back to WL, but fail to do so
> > due to an internal error.
> 
> This is not easy. In the !e case I need memory.

Yes, I noticed this is far from trivial ;)
What troubles me, is that the system is unaware of the fault, acts
business as usual, with one less PEB.

Care to elaborate how come 'ubi->lookuptbl[pnum]' is NULL? What's the
exact flow leading to it?
 
> > (BTW the return value of ubi_wl_put_fm_peb is not tested at the call
> > sites)
> > The system is left with a missing PEB.
> > Can't we guarantee ubi_wl_put_fm_peb is called only after wl_init is
> > completed?
> 
> Why would that help?

I tried to asses why 'ubi->lookuptbl[pnum]' gets NULL, I was
probably mistaken in my analysis.

There's a bit of assymetry here.

'ubi_wl_get_fm_peb' doesn't clear the 'ubi_wl_entry' from the lookuptbl.
It does not free the 'ubi_wl_entry' either (seems like it should, no?)

However 'ubi_wl_put_fm_peb' creates a 'ubi_wl_entry' if not found in
the lookuptbl.

Formerly, wl_init was responsible for correctly populating
'ubi->lookuptbl'. Can we somehow preserve this for FM pebs as well?

Regards,
Shmulik
Richard Weinberger May 22, 2012, 6:57 p.m. UTC | #5
Hi Shmulik,

On Tue, 22 May 2012 21:18:09 +0300, Shmulik Ladkani
<shmulik.ladkani@gmail.com> wrote:
>> If the fastmap is not written complete the CRC check will fail next time
>> and we fall back to scanning mode.
> 
> There are many fail-points within 'ubi_update_fastmap' where an error
> code is returned, long before it attempts writing to the media.
> If 'ubi_update_fastmap' fails due to those, then the old on-flash
> fastmap is still valid.
> However we know a volume has been just changed.
> Will the system be coherent after a sudden reboot (that occurs prior
> next successful fastmap update)?

In this case fastmap will detect that the pool contains LEB this an
unknown volume ID.
But maybe it would be better to kill the current fastmap as in the very
first step to
avoid any unknown corner cases.
I have to think about this. :-)

>> >> diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
>> >> index 5275632..299a601 100644
>> >> --- a/drivers/mtd/ubi/ubi.h
>> >> +++ b/drivers/mtd/ubi/ubi.h
>> >> @@ -202,6 +202,39 @@ struct ubi_rename_entry {
>> >>   struct ubi_volume_desc;
>> >>
>> >>   /**
>> >> + * struct ubi_fastmap - in-memory fastmap data structure.
>> >> + * @peb: PEBs used by the current fastamp
>> >
>> > s/fastamp/fastmap/
>> >
>> >> + * @ec: the erase counter of each used PEB
>> >> + * @size: size of the fastmap in bytes
>> >> + * @used_blocks: number of used PEBs
>> >> + */
>> >> +struct ubi_fastmap {
>> >
>> > Suggestion: maybe ubi_fastmap_location / ubi_fastmap_layout /
>> > ubi_fastmap_position ? slightly more explanatory than 'ubi_fastmap'.
>>
>> Good idea. ubi_fastmap_position seems good to me.
> 
> Sounds good. You could amend the comments above the struct as well.
> 
>> >> + * @root: the RB-tree where to look for
>> >> + * @max_pnum: highest possible pnum
>> >> + *
>> >> + * This function looks for a wear leveling entry containing a eb which
>> >> + * is in front of the memory.
>> >
>> > IMO can remove the last comment.
>>
>> Documentation is like sex: when it is good, it is very, very good; and
>> when it is bad, it is better than nothing.
>> - Dick Brandon
> 
> :-)
> 
> BTW what is "in front of the memory"? ;)

I'll rephrase the comment. :D
I meant "in front of the NAND memory" IOW a PEB with a low number.

>> >> +		if (!e) {
>> >> +			spin_unlock(&ubi->wl_lock);
>> >> +			return -ENOMEM;
>> >
>> > This is traumatic; you must return the PEB back to WL, but fail to do so
>> > due to an internal error.
>>
>> This is not easy. In the !e case I need memory.
> 
> Yes, I noticed this is far from trivial ;)
> What troubles me, is that the system is unaware of the fault, acts
> business as usual, with one less PEB.
> 
> Care to elaborate how come 'ubi->lookuptbl[pnum]' is NULL? What's the
> exact flow leading to it?

PEBs used by the fastmap sub-system are not known by the WL and EBA
sub-system.
A PEB used by fastmap contains mostly raw data. (It does not have any
LEBs).
E.g. If PEBs 0,1 and 2 are used by fastmap they are not in
ubi->lookuptbl.
So, right after attaching from a fastmap ubi->lookuptbl[0|1|2] is NULL.
By writing a new fastmap 0, 1 and 2 will be put back to the WL
sub-system and they appear in
ubi->lookuptbl.

>> > (BTW the return value of ubi_wl_put_fm_peb is not tested at the call
>> > sites)
>> > The system is left with a missing PEB.
>> > Can't we guarantee ubi_wl_put_fm_peb is called only after wl_init is
>> > completed?
>>
>> Why would that help?
> 
> I tried to asses why 'ubi->lookuptbl[pnum]' gets NULL, I was
> probably mistaken in my analysis.
> 
> There's a bit of assymetry here.

Keeping the fastmap PEBs away from the WL sub-system is tricky, yes.

> 'ubi_wl_get_fm_peb' doesn't clear the 'ubi_wl_entry' from the lookuptbl.
> It does not free the 'ubi_wl_entry' either (seems like it should, no?)

It removes a PEB from the free rb-tree.
ubi->lookuptbl[pnum] does not matter at this time.

> However 'ubi_wl_put_fm_peb' creates a 'ubi_wl_entry' if not found in
> the lookuptbl.

Yeah, but only in one corner case.
See my comment:
        /* This can happen if we recovered from a fastmap the very
         * frist time and writing now a new one. In this case the wl
system
         * has never seen any PEB used by the original fastmap.
         */

> Formerly, wl_init was responsible for correctly populating
> 'ubi->lookuptbl'. Can we somehow preserve this for FM pebs as well?
> 

Currently fastmap "fixes" ubi->lookuptbl on demand. Is this a problem?

Thanks,
//richard
Richard Weinberger May 22, 2012, 8:11 p.m. UTC | #6
On Tue, 22 May 2012 21:18:09 +0300, Shmulik Ladkani
<shmulik.ladkani@gmail.com> wrote:
> Hi Richard,
> 
> On Tue, 22 May 2012 18:55:10 +0200 Richard Weinberger <richard@nod.at> wrote:
>> >> diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
>> >> index 2c5ed5c..ef5d7b7 100644
>> >> --- a/drivers/mtd/ubi/build.c
>> >> +++ b/drivers/mtd/ubi/build.c
>> >> @@ -144,6 +144,16 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
>> >>
>> >>   	ubi_do_get_device_info(ubi,&nt.di);
>> >>   	ubi_do_get_volume_info(ubi, vol,&nt.vi);
>> >> +
>> >> +	switch (ntype) {
>> >> +	case UBI_VOLUME_ADDED:
>> >> +	case UBI_VOLUME_REMOVED:
>> >> +	case UBI_VOLUME_RESIZED:
>> >> +	case UBI_VOLUME_RENAMED:
>> >> +		if (ubi_update_fastmap(ubi))
>> >> +			ubi_err("Unable to update fastmap!");
>> >
>> > In the error case, what are the consequences leaving the older on-flash
>> > fastmap? Shouldn't it be invalidated?
>>

After thinking a bit more about this case I think the best we can to is
switching to read-only mode if ubi_update_fastmap()
fails.

Thanks,
//richard
Shmulik Ladkani May 23, 2012, 6:18 a.m. UTC | #7
Hi Richard,

On Tue, 22 May 2012 20:57:25 +0200 Richard Weinberger <richard@nod.at> wrote:
> > Care to elaborate how come 'ubi->lookuptbl[pnum]' is NULL? What's the
> > exact flow leading to it?
> 
> PEBs used by the fastmap sub-system are not known by the WL and EBA
> sub-system.
> A PEB used by fastmap contains mostly raw data. (It does not have any
> LEBs).
> E.g. If PEBs 0,1 and 2 are used by fastmap they are not in
> ubi->lookuptbl.
> So, right after attaching from a fastmap ubi->lookuptbl[0|1|2] is NULL.
> By writing a new fastmap 0, 1 and 2 will be put back to the WL
> sub-system and they appear in
> ubi->lookuptbl.

Thanks. Understood.

> > 'ubi_wl_get_fm_peb' doesn't clear the 'ubi_wl_entry' from the lookuptbl.
> > It does not free the 'ubi_wl_entry' either (seems like it should, no?)
> 
> It removes a PEB from the free rb-tree.
> ubi->lookuptbl[pnum] does not matter at this time.

For practical matters, you are correct.
Design wise, it is bit inconsistent and confusing.

On one hand, you claim above that 'ubi->lookuptbl' holds the WL entries
known to WL subsystem - that is, lookuptbl is a data structure used and
maintained by the WL subsystem.
OTOH, when a PEB is removed from hands of WL (by a ubi_wl_get_fm_peb
call), you keep its WL entry assigned in ubi->lookuptbl.

I'll rethink this, see if there's a potential trouble here.

> > However 'ubi_wl_put_fm_peb' creates a 'ubi_wl_entry' if not found in
> > the lookuptbl.
> 
> Yeah, but only in one corner case.
> See my comment:
>         /* This can happen if we recovered from a fastmap the very
>          * frist time and writing now a new one. In this case the wl
> system
>          * has never seen any PEB used by the original fastmap.
>          */
> 
> > Formerly, wl_init was responsible for correctly populating
> > 'ubi->lookuptbl'. Can we somehow preserve this for FM pebs as well?
> > 
> 
> Currently fastmap "fixes" ubi->lookuptbl on demand. Is this a problem?

I guess not.

The only problem, as previously noted, is the failure to create a new
'ubi_wl_entry' when the PEB needs to be returned to WL subsystem.

If all relevant wl entries are created during 'wl_init' (including those
currently associated to the fastmap), then we are fine, as internal
failures within 'wl_init' propagate back to the attach code.

I'll give it some more thought.

Regards,
Shmulik
Richard Weinberger May 23, 2012, 7:43 a.m. UTC | #8
On 23.05.2012 08:18, Shmulik Ladkani wrote:
>> It removes a PEB from the free rb-tree.
>> ubi->lookuptbl[pnum] does not matter at this time.
>
> For practical matters, you are correct.
> Design wise, it is bit inconsistent and confusing.
>
> On one hand, you claim above that 'ubi->lookuptbl' holds the WL entries
> known to WL subsystem - that is, lookuptbl is a data structure used and
> maintained by the WL subsystem.
> OTOH, when a PEB is removed from hands of WL (by a ubi_wl_get_fm_peb
> call), you keep its WL entry assigned in ubi->lookuptbl.
>
> I'll rethink this, see if there's a potential trouble here.

If I remove the PEB from ubi->lookuptbl I'll have to reread the EC 
header upon ubi_wl_put_fm_peb().
Otherwise it's EC value is lost.

>>> However 'ubi_wl_put_fm_peb' creates a 'ubi_wl_entry' if not found in
>>> the lookuptbl.
>> Currently fastmap "fixes" ubi->lookuptbl on demand. Is this a problem?
>
> I guess not.
>
> The only problem, as previously noted, is the failure to create a new
> 'ubi_wl_entry' when the PEB needs to be returned to WL subsystem.

I think I can return them to the lookuptbl while attaching.

Stay tuned, I'll release v7 today or tomorrow.

Thanks,
//richard
Artem Bityutskiy May 24, 2012, 8:17 a.m. UTC | #9
On Tue, 2012-05-22 at 18:55 +0200, Richard Weinberger wrote:
> >> +    e = find_early_wl_entry(&ubi->free, max_pnum);
> >
> > This picks the eb with the lowest pnum within 'ubi->free'.
> >
> > When called with INT_MAX (for the FM_DATA), why do you need to pick
> a
> > free eb with the minimal pnum? The FM_DATA EBs may reside everywhere
> (as
> > the FM_SB holds their location).
> > So why not pick the eb with a medium EC value (as done for standard
> > get_peb calls)? That might be better wear-leveling wise.
> 
> Fair point.
> I'll fix that.
> Artem, any comments on that?

The 'find_early_wl_entry()' function is used (currently) only at early
stages. At these stages the we do not have the PEBs sorted by EC. We
have just a list. This function should not be use after the WL subsystem
is initialized.
>
Artem Bityutskiy May 24, 2012, 8:19 a.m. UTC | #10
On Tue, 2012-05-22 at 22:11 +0200, Richard Weinberger wrote:
> >> > In the error case, what are the consequences leaving the older
> on-flash
> >> > fastmap? Shouldn't it be invalidated?
> >>
> 
> After thinking a bit more about this case I think the best we can to
> is
> switching to read-only mode if ubi_update_fastmap()
> fails.
> 
Unless it fails because of -EIO, which means the PEB may become bad and
you need to pick a different one and schedule the faulty one for
torturing and marking as bad.
Artem Bityutskiy May 24, 2012, 8:22 a.m. UTC | #11
On Tue, 2012-05-22 at 18:55 +0200, Richard Weinberger wrote:
> >> +     * has never seen any PEB used by the original fastmap.
> >> +     */
> >> +    if (!e) {
> >> +            ubi_assert(ubi->old_fm);
> >> +            e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_ATOMIC);
> >
> > Must it be GFP_ATOMIC?
> 
> Yes. This function is called under a spinlock.

I did not look close, but this sounds bad.

You need to have a much better justification than "I allocate it under a
spinlock". You need to tell "... because there is no way or very
difficult to pre-allocate it while I do not have the spinlock held,
because ... (explanation)".

>
Richard Weinberger May 24, 2012, 8:24 a.m. UTC | #12
On 24.05.2012 10:22, Artem Bityutskiy wrote:
> On Tue, 2012-05-22 at 18:55 +0200, Richard Weinberger wrote:
>>>> +     * has never seen any PEB used by the original fastmap.
>>>> +     */
>>>> +    if (!e) {
>>>> +            ubi_assert(ubi->old_fm);
>>>> +            e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_ATOMIC);
>>>
>>> Must it be GFP_ATOMIC?
>>
>> Yes. This function is called under a spinlock.
>
> I did not look close, but this sounds bad.
>
> You need to have a much better justification than "I allocate it under a
> spinlock". You need to tell "... because there is no way or very
> difficult to pre-allocate it while I do not have the spinlock held,
> because ... (explanation)".
>

True, in v7 I've reworked ubi_wl_put_fm_peb(). Now it does no longer 
need GFP_ATOMIC.

Thanks,
//richard
Richard Weinberger May 24, 2012, 8:26 a.m. UTC | #13
On 24.05.2012 10:19, Artem Bityutskiy wrote:
>> After thinking a bit more about this case I think the best we can to
>> is
>> switching to read-only mode if ubi_update_fastmap()
>> fails.
>>
> Unless it fails because of -EIO, which means the PEB may become bad and
> you need to pick a different one and schedule the faulty one for
> torturing and marking as bad.

Wouldn't make more sense if ubi_update_fastmap() handles the -EIO case 
internally?

Currently (as of v7) both callers ubi_volume_notify() and 
ubi_wl_get_peb() check the return value of ubi_update_fastmap()
and set UBI into read-only mode if ubi_update_fastmap() fails.

Thanks,
//richard
Artem Bityutskiy May 24, 2012, 9:21 a.m. UTC | #14
On Thu, 2012-05-24 at 10:26 +0200, Richard Weinberger wrote:
> On 24.05.2012 10:19, Artem Bityutskiy wrote:
> >> After thinking a bit more about this case I think the best we can to
> >> is
> >> switching to read-only mode if ubi_update_fastmap()
> >> fails.
> >>
> > Unless it fails because of -EIO, which means the PEB may become bad and
> > you need to pick a different one and schedule the faulty one for
> > torturing and marking as bad.
> 
> Wouldn't make more sense if ubi_update_fastmap() handles the -EIO case 
> internally?

Probably, I just meant that at the end the faulty PEB should end up
being scheduled for erasure and torturing like all other places do it.
We should preferably have one code path which marks PEBs as bad. It is
much easier than to make changes there.
Shmulik Ladkani May 24, 2012, 9:56 a.m. UTC | #15
Hi Artem,

On Thu, 24 May 2012 11:17:52 +0300 Artem Bityutskiy <dedekind1@gmail.com> wrote:
> On Tue, 2012-05-22 at 18:55 +0200, Richard Weinberger wrote:
> > >> +    e = find_early_wl_entry(&ubi->free, max_pnum);
> > >
> > > This picks the eb with the lowest pnum within 'ubi->free'.
> > >
> > > When called with INT_MAX (for the FM_DATA), why do you need to pick
> > a
> > > free eb with the minimal pnum? The FM_DATA EBs may reside everywhere
> > (as
> > > the FM_SB holds their location).
> > > So why not pick the eb with a medium EC value (as done for standard
> > > get_peb calls)? That might be better wear-leveling wise.
> > 
> > Fair point.
> > I'll fix that.
> > Artem, any comments on that?
> 
> The 'find_early_wl_entry()' function is used (currently) only at early
> stages. At these stages the we do not have the PEBs sorted by EC. We
> have just a list. This function should not be use after the WL subsystem
> is initialized.

'find_early_wl_entry' is only called from 'ubi_wl_get_fm_peb'.

'ubi_wl_get_fm_peb' is called twice from within 'ubi_update_fastmap':
First call, to get the FM_SB, with 'max_pnum' set as UBI_FM_MAX_START.
Second, to get FM_DATA pebs, with 'max_pnum' as -1, do indicate "no
matter the location, give me pebs from the free pool".

'ubi_update_fastmap' is called from 'ubi_volume_notify' and from
'ubi_wl_get_peb' - at both points, I assume ubi->free rbtree is properly
populated. Am I mistaken?

Regards,
Shmulik
Richard Weinberger May 24, 2012, 10:03 a.m. UTC | #16
On 24.05.2012 11:56, Shmulik Ladkani wrote:
> Hi Artem,
>
> On Thu, 24 May 2012 11:17:52 +0300 Artem Bityutskiy<dedekind1@gmail.com>  wrote:
>> On Tue, 2012-05-22 at 18:55 +0200, Richard Weinberger wrote:
>>>>> +    e = find_early_wl_entry(&ubi->free, max_pnum);
>>>>
>>>> This picks the eb with the lowest pnum within 'ubi->free'.
>>>>
>>>> When called with INT_MAX (for the FM_DATA), why do you need to pick
>>> a
>>>> free eb with the minimal pnum? The FM_DATA EBs may reside everywhere
>>> (as
>>>> the FM_SB holds their location).
>>>> So why not pick the eb with a medium EC value (as done for standard
>>>> get_peb calls)? That might be better wear-leveling wise.
>>>
>>> Fair point.
>>> I'll fix that.
>>> Artem, any comments on that?
>>
>> The 'find_early_wl_entry()' function is used (currently) only at early
>> stages. At these stages the we do not have the PEBs sorted by EC. We
>> have just a list. This function should not be use after the WL subsystem
>> is initialized.
>
> 'find_early_wl_entry' is only called from 'ubi_wl_get_fm_peb'.

Correct.

> 'ubi_wl_get_fm_peb' is called twice from within 'ubi_update_fastmap':
> First call, to get the FM_SB, with 'max_pnum' set as UBI_FM_MAX_START.
> Second, to get FM_DATA pebs, with 'max_pnum' as -1, do indicate "no
> matter the location, give me pebs from the free pool".

Correct. Maybe I should rename find_early_wl_entry() to 
find_wl_entry_for_fastmap() or something like that...

> 'ubi_update_fastmap' is called from 'ubi_volume_notify' and from
> 'ubi_wl_get_peb' - at both points, I assume ubi->free rbtree is properly
> populated. Am I mistaken?

Also correct.

Thanks,
//richard
Shmulik Ladkani May 24, 2012, 8:07 p.m. UTC | #17
Hi,

On Thu, 24 May 2012 12:03:28 +0200 Richard Weinberger <richard@nod.at> wrote:
> On 24.05.2012 11:56, Shmulik Ladkani wrote:
> > Hi Artem,
> >
> > On Thu, 24 May 2012 11:17:52 +0300 Artem Bityutskiy<dedekind1@gmail.com>  wrote:
> >> On Tue, 2012-05-22 at 18:55 +0200, Richard Weinberger wrote:
> >>>>> +    e = find_early_wl_entry(&ubi->free, max_pnum);
> >>>>
> >>>> This picks the eb with the lowest pnum within 'ubi->free'.
> >>>>
> >>>> When called with INT_MAX (for the FM_DATA), why do you need to pick
> >>> a
> >>>> free eb with the minimal pnum? The FM_DATA EBs may reside everywhere
> >>> (as
> >>>> the FM_SB holds their location).
> >>>> So why not pick the eb with a medium EC value (as done for standard
> >>>> get_peb calls)? That might be better wear-leveling wise.
> >>>
> >>> Fair point.
> >>> I'll fix that.
> >>> Artem, any comments on that?
> >>
> >> The 'find_early_wl_entry()' function is used (currently) only at early
> >> stages. At these stages the we do not have the PEBs sorted by EC. We
> >> have just a list. This function should not be use after the WL subsystem
> >> is initialized.
> >
> > 'find_early_wl_entry' is only called from 'ubi_wl_get_fm_peb'.
> 
> Correct.
> 
> > 'ubi_wl_get_fm_peb' is called twice from within 'ubi_update_fastmap':
> > First call, to get the FM_SB, with 'max_pnum' set as UBI_FM_MAX_START.
> > Second, to get FM_DATA pebs, with 'max_pnum' as -1, do indicate "no
> > matter the location, give me pebs from the free pool".
> 
> Correct. Maybe I should rename find_early_wl_entry() to 
> find_wl_entry_for_fastmap() or something like that...

Yes, name is misleading.
How about the ugly but descriptive: find_wl_entry_by_max_pnum?

Regards,
Shmulik
diff mbox

Patch

diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index a0803ac..c9f0269 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -1,6 +1,6 @@ 
 obj-$(CONFIG_MTD_UBI) += ubi.o
 
 ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o
-ubi-y += misc.o debug.o
+ubi-y += fastmap.o misc.o debug.o
 
 obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index f59f748..f044884 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -294,7 +294,7 @@  static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
 }
 
 /**
- * compare_lebs - find out which logical eraseblock is newer.
+ * ubi_compare_lebs - find out which logical eraseblock is newer.
  * @ubi: UBI device description object
  * @aeb: first logical eraseblock to compare
  * @pnum: physical eraseblock number of the second logical eraseblock to
@@ -313,7 +313,7 @@  static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
  *     o bit 2 is cleared: the older LEB is not corrupted;
  *     o bit 2 is set: the older LEB is corrupted.
  */
-static int compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
 			int pnum, const struct ubi_vid_hdr *vid_hdr)
 {
 	void *buf;
@@ -501,7 +501,7 @@  int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
 		 * sequence numbers. We still can attach these images, unless
 		 * there is a need to distinguish between old and new
 		 * eraseblocks, in which case we'll refuse the image in
-		 * 'compare_lebs()'. In other words, we attach old clean
+		 * 'ubi_compare_lebs()'. In other words, we attach old clean
 		 * images, but refuse attaching old images with duplicated
 		 * logical eraseblocks because there was an unclean reboot.
 		 */
@@ -517,7 +517,7 @@  int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
 		 * Now we have to drop the older one and preserve the newer
 		 * one.
 		 */
-		cmp_res = compare_lebs(ubi, aeb, pnum, vid_hdr);
+		cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
 		if (cmp_res < 0)
 			return cmp_res;
 
@@ -977,7 +977,12 @@  static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
 	}
 
 	vol_id = be32_to_cpu(vidh->vol_id);
-	if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
+
+	if (vol_id > UBI_MAX_VOLUMES &&
+		vol_id != UBI_LAYOUT_VOLUME_ID &&
+		((vol_id != UBI_FM_SB_VOLUME_ID &&
+		vol_id != UBI_FM_DATA_VOLUME_ID) ||
+		ubi->attached_by_scanning == true)) {
 		int lnum = be32_to_cpu(vidh->lnum);
 
 		/* Unsupported internal volume */
@@ -1208,18 +1213,34 @@  out_ai:
 }
 
 /**
- * ubi_attach - attach an MTD device.
- * @ubi: UBI device descriptor
+ * scan_fastmap - attach MTD device using fastmap.
+ * @ubi: UBI device description object
  *
- * This function returns zero in case of success and a negative error code in
- * case of failure.
+ * This function attaches a MTD device using a fastmap and returns complete
+ * information about it in form of a "struct ubi_attach_info" object. In case
+ * of failure, an error code is returned.
  */
-int ubi_attach(struct ubi_device *ubi)
+static struct ubi_attach_info *scan_fastmap(struct ubi_device *ubi)
+{
+	int fm_start;
+
+	fm_start = ubi_find_fastmap(ubi);
+	if (fm_start < 0)
+		return ERR_PTR(-ENOENT);
+
+	return ubi_read_fastmap(ubi, fm_start);
+}
+
+static int do_attach(struct ubi_device *ubi, bool fullscan)
 {
 	int err;
 	struct ubi_attach_info *ai;
 
-	ai = scan_all(ubi);
+	if (fullscan)
+		ai = scan_all(ubi);
+	else
+		ai = scan_fastmap(ubi);
+
 	if (IS_ERR(ai))
 		return PTR_ERR(ai);
 
@@ -1256,6 +1277,31 @@  out_ai:
 }
 
 /**
+ * ubi_attach - attach an MTD device.
+ * @ubi: UBI device descriptor
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_attach(struct ubi_device *ubi)
+{
+	int err;
+
+	err = do_attach(ubi, false);
+	if (err) {
+		if (err != -ENOENT)
+			ubi_err("Attach by fastmap failed! " \
+				"Falling back to attach by scanning. " \
+				"error = %i\n", err);
+
+		ubi->attached_by_scanning = true;
+		err = do_attach(ubi, true);
+	}
+
+	return err;
+}
+
+/**
  * destroy_av - free volume attaching information.
  * @av: volume attaching information
  * @ai: attaching information
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 2c5ed5c..ef5d7b7 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -144,6 +144,16 @@  int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
 
 	ubi_do_get_device_info(ubi, &nt.di);
 	ubi_do_get_volume_info(ubi, vol, &nt.vi);
+
+	switch (ntype) {
+	case UBI_VOLUME_ADDED:
+	case UBI_VOLUME_REMOVED:
+	case UBI_VOLUME_RESIZED:
+	case UBI_VOLUME_RENAMED:
+		if (ubi_update_fastmap(ubi))
+			ubi_err("Unable to update fastmap!");
+	}
+
 	return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
 }
 
@@ -874,6 +884,19 @@  int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
 	ubi->vid_hdr_offset = vid_hdr_offset;
 	ubi->autoresize_vol_id = -1;
 
+	ubi->fm_pool.used = ubi->fm_pool.size = 0;
+
+	/*
+	 * fm_pool.max_size is 5% of the total number of PEBs but it's also
+	 * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
+	 */
+	ubi->fm_pool.max_size = min((mtd_div_by_eb(ubi->mtd->size, ubi->mtd) /
+		100) * 5, UBI_FM_MAX_POOL_SIZE);
+	if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
+		ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
+
+	ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
+
 	mutex_init(&ubi->buf_mutex);
 	mutex_init(&ubi->ckvol_mutex);
 	mutex_init(&ubi->device_mutex);
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 5fa726f..654f121 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -57,7 +57,7 @@ 
  * global sequence counter value. It also increases the global sequence
  * counter.
  */
-static unsigned long long next_sqnum(struct ubi_device *ubi)
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
 {
 	unsigned long long sqnum;
 
@@ -522,7 +522,7 @@  retry:
 		goto out_put;
 	}
 
-	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 	err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
 	if (err)
 		goto write_error;
@@ -633,7 +633,7 @@  int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
 	}
 
 	vid_hdr->vol_type = UBI_VID_DYNAMIC;
-	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 	vid_hdr->vol_id = cpu_to_be32(vol_id);
 	vid_hdr->lnum = cpu_to_be32(lnum);
 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -694,7 +694,7 @@  write_error:
 		return err;
 	}
 
-	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 	ubi_msg("try another PEB");
 	goto retry;
 }
@@ -747,7 +747,7 @@  int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
 		return err;
 	}
 
-	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 	vid_hdr->vol_id = cpu_to_be32(vol_id);
 	vid_hdr->lnum = cpu_to_be32(lnum);
 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -812,7 +812,7 @@  write_error:
 		return err;
 	}
 
-	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 	ubi_msg("try another PEB");
 	goto retry;
 }
@@ -864,7 +864,7 @@  int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
 	if (err)
 		goto out_mutex;
 
-	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 	vid_hdr->vol_id = cpu_to_be32(vol_id);
 	vid_hdr->lnum = cpu_to_be32(lnum);
 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -932,7 +932,7 @@  write_error:
 		goto out_leb_unlock;
 	}
 
-	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 	ubi_msg("try another PEB");
 	goto retry;
 }
@@ -1092,7 +1092,7 @@  int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
 		vid_hdr->data_size = cpu_to_be32(data_size);
 		vid_hdr->data_crc = cpu_to_be32(crc);
 	}
-	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 
 	err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
 	if (err) {
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
new file mode 100644
index 0000000..2d1056e
--- /dev/null
+++ b/drivers/mtd/ubi/fastmap.c
@@ -0,0 +1,1132 @@ 
+/*
+ * Copyright (c) 2012 Linutronix GmbH
+ * Author: Richard Weinberger <richard@nod.at>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ */
+
+#include <linux/crc32.h>
+#include "ubi.h"
+
+/**
+ * new_fm_vhdr - allocate a new volume header for fastmap usage.
+ * @ubi: UBI device description object
+ * @vol_id: the VID of the new header
+ */
+static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
+{
+	struct ubi_vid_hdr *new;
+
+	new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+	if (!new)
+		goto out;
+
+	new->vol_type = UBI_VID_DYNAMIC;
+	new->vol_id = cpu_to_be32(vol_id);
+
+	/* UBI implementations without fastmap support have to delete the
+	 * fastmap.
+	 */
+	new->compat = UBI_COMPAT_DELETE;
+
+out:
+	return new;
+}
+
+/**
+ * add_aeb - create and add a attach erase block to a given list.
+ * @ai: UBI attach info object
+ * @list: the target list
+ * @pnum: PEB number of the new attach erase block
+ * @ec: erease counter of the new LEB
+ */
+static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
+		   int pnum, int ec)
+{
+	struct ubi_ainf_peb *aeb;
+
+	aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+	if (!aeb)
+		return -ENOMEM;
+
+	aeb->pnum = pnum;
+	aeb->ec = ec;
+	aeb->lnum = -1;
+	aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
+
+	ai->ec_sum += aeb->ec;
+	ai->ec_count++;
+
+	if (ai->max_ec < aeb->ec)
+		ai->max_ec = aeb->ec;
+
+	if (ai->min_ec > aeb->ec)
+		ai->min_ec = aeb->ec;
+
+	list_add_tail(&aeb->u.list, list);
+
+	return 0;
+}
+
+/**
+ * add_vol - create and add a new scan volume to ubi_attach_info.
+ * @ai: ubi_attach_info object
+ * @vol_id: VID of the new volume
+ * @used_ebs: number of used EBS
+ * @data_pad: data padding value of the new volume
+ * @vol_type: volume type
+ * @last_eb_bytes: number of bytes in the last LEB
+ */
+static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
+				       int used_ebs, int data_pad, u8 vol_type,
+				       int last_eb_bytes)
+{
+	struct ubi_ainf_volume *av;
+	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+	while (*p) {
+		parent = *p;
+		av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+		if (vol_id > av->vol_id)
+			p = &(*p)->rb_left;
+		else if (vol_id > av->vol_id)
+			p = &(*p)->rb_right;
+	}
+
+	av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
+	if (!av)
+		goto out;
+
+	av->highest_lnum = av->leb_count = 0;
+	av->vol_id = vol_id;
+	av->used_ebs = used_ebs;
+	av->data_pad = data_pad;
+	av->last_data_size = last_eb_bytes;
+	av->compat = 0;
+	av->vol_type = vol_type;
+	av->root = RB_ROOT;
+
+	dbg_bld("Found volume (volid %i)", vol_id);
+
+	rb_link_node(&av->rb, parent, p);
+	rb_insert_color(&av->rb, &ai->volumes);
+
+out:
+	return av;
+}
+
+/**
+ * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
+ * from it's original list.
+ * @ai: ubi_attach_info object
+ * @aeb: the to be assigned SEB
+ * @av: target scan volume
+ */
+static void assign_aeb_to_av(struct ubi_attach_info *ai,
+			     struct ubi_ainf_peb *aeb,
+			     struct ubi_ainf_volume *av)
+{
+	struct ubi_ainf_peb *tmp_aeb;
+	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+	p = &av->root.rb_node;
+	while (*p) {
+		parent = *p;
+
+		tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+		if (aeb->lnum != tmp_aeb->lnum) {
+			if (aeb->lnum < tmp_aeb->lnum)
+				p = &(*p)->rb_left;
+			else
+				p = &(*p)->rb_right;
+
+			continue;
+		} else
+			break;
+	}
+
+	list_del(&aeb->u.list);
+	av->leb_count++;
+
+	rb_link_node(&aeb->u.rb, parent, p);
+	rb_insert_color(&aeb->u.rb, &av->root);
+}
+
+/**
+ * update_vol - inserts or updates a LEB which was found a pool.
+ * @ubi: the UBI device object
+ * @ai: attach info object
+ * @av: the volume this LEB belongs to
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ */
+static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
+		      struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
+		      struct ubi_ainf_peb *new_aeb)
+{
+	struct rb_node **p = &av->root.rb_node, *parent = NULL;
+	struct ubi_ainf_peb *aeb, *victim;
+	int cmp_res;
+
+	while (*p) {
+		parent = *p;
+		aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+
+		if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
+			if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
+				p = &(*p)->rb_left;
+			else
+				p = &(*p)->rb_right;
+
+			continue;
+		}
+
+		/* This case can happen if the fastmap gets written
+		 * because of a volume change (creation, deletion, ..).
+		 * Then a PEB can be within the persistent EBA and the pool.
+		 */
+		if (aeb->pnum == new_aeb->pnum) {
+			kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+			return 0;
+		}
+
+		cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
+		if (cmp_res < 0)
+			return cmp_res;
+
+		/* new_aeb is newer */
+		if (cmp_res & 1) {
+			victim = kmem_cache_alloc(ai->aeb_slab_cache,
+				GFP_KERNEL);
+			if (!victim)
+				return -ENOMEM;
+
+			victim->ec = aeb->ec;
+			victim->pnum = aeb->pnum;
+			list_add_tail(&victim->u.list, &ai->erase);
+
+			if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
+				av->last_data_size = \
+					be32_to_cpu(new_vh->data_size);
+
+			aeb->ec = new_aeb->ec;
+			aeb->pnum = new_aeb->pnum;
+			aeb->copy_flag = new_vh->copy_flag;
+			kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+		/* new_aeb is older */
+		} else {
+			dbg_bld("Vol %i: AEB %i's PEB %i is old, dropping it\n",
+				av->vol_id, aeb->lnum, new_aeb->pnum);
+			list_add_tail(&new_aeb->u.list, &ai->erase);
+		}
+
+		return 0;
+	}
+	/* This LEB is new, let's add it to the volume */
+
+	if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
+		av->highest_lnum = be32_to_cpu(new_vh->lnum);
+		av->last_data_size = be32_to_cpu(new_vh->data_size);
+	}
+
+	if (av->vol_type == UBI_STATIC_VOLUME)
+		av->used_ebs = be32_to_cpu(new_vh->used_ebs);
+
+	av->leb_count++;
+
+	rb_link_node(&new_aeb->u.rb, parent, p);
+	rb_insert_color(&new_aeb->u.rb, &av->root);
+
+	return 0;
+}
+
+/**
+ * process_pool_aeb - we found a non-empty PEB in a pool
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ */
+static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+			    struct ubi_vid_hdr *new_vh,
+			    struct ubi_ainf_peb *new_aeb)
+{
+	struct ubi_ainf_volume *av, *tmp_av = NULL;
+	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+	int found = 0;
+
+	if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
+		be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
+		kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+		return 0;
+	}
+
+	/* Find the volume this SEB belongs to */
+	while (*p) {
+		parent = *p;
+		tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+		if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
+			p = &(*p)->rb_left;
+		else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
+			p = &(*p)->rb_right;
+		else {
+			found = 1;
+			break;
+		}
+	}
+
+	if (found)
+		av = tmp_av;
+	else {
+		ubi_err("Orphaned volume in fastmap pool!");
+
+		return -EINVAL;
+	}
+
+	ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
+
+	return update_vol(ubi, ai, av, new_vh, new_aeb);
+}
+
+/**
+ * scan_pool - scans a pool for changed (no longer empty PEBs)
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @pebs: an array of all PEB numbers in the to be scanned pool
+ * @pool_size: size of the pool (number of entries in @pebs)
+ * @max_sqnum: pointer to the maximal sequence number
+ */
+static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
+	int *pebs, int pool_size, unsigned long long *max_sqnum)
+{
+	struct ubi_vid_hdr *vh;
+	struct ubi_ec_hdr *ech;
+	struct ubi_ainf_peb *new_aeb;
+	int i;
+	int pnum;
+	int err;
+	int ret = 0;
+
+	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+	if (!ech)
+		return -ENOMEM;
+
+	vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+	if (!vh) {
+		kfree(ech);
+		return -ENOMEM;
+	}
+
+	/*
+	 * Now scan all PEBs in the pool to find changes which have been made
+	 * after the creation of the fastmap
+	 */
+	for (i = 0; i < pool_size; i++) {
+		pnum = be32_to_cpu(pebs[i]);
+
+		err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+
+		if (err == UBI_IO_FF)
+			continue;
+		else if (err == 0) {
+			err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+			if (err) {
+				ret = err;
+
+				goto out;
+			}
+
+			new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
+				GFP_KERNEL);
+			if (!new_aeb) {
+				ret = -ENOMEM;
+
+				goto out;
+			}
+
+			new_aeb->ec = be64_to_cpu(ech->ec);
+			new_aeb->pnum = pnum;
+			new_aeb->lnum = be32_to_cpu(vh->lnum);
+			new_aeb->sqnum = be64_to_cpu(vh->sqnum);
+			new_aeb->copy_flag = vh->copy_flag;
+			new_aeb->scrub = 0;
+
+			err = process_pool_aeb(ubi, ai, vh, new_aeb);
+			if (err) {
+				ret = err;
+
+				goto out;
+			}
+
+			if (*max_sqnum < new_aeb->sqnum)
+				*max_sqnum = new_aeb->sqnum;
+		} else {
+			/* We are paranoid and fall back to scanning mode */
+			ubi_err("Fastmap pool PEBs contains damaged PEBs!");
+			ret = err;
+
+			goto out;
+		}
+
+	}
+
+out:
+	ubi_free_vid_hdr(ubi, vh);
+	kfree(ech);
+
+	return ret;
+}
+
+/**
+ * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
+ * @ubi: UBI device object
+ * @fm_raw: the fastmap it self as byte array
+ * @fm_size: size of the fastmap in bytes
+ */
+struct ubi_attach_info *ubi_attach_fastmap(struct ubi_device *ubi, char *fm_raw,
+				       size_t fm_size)
+{
+	struct list_head used;
+	struct ubi_ainf_volume *av;
+	struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
+	struct ubi_attach_info *ai;
+
+	struct ubi_fm_sb *fmsb;
+	struct ubi_fm_hdr *fmhdr;
+	struct ubi_fm_scan_pool *fmpl;
+	struct ubi_fm_ec *fmec;
+	struct ubi_fm_volhdr *fmvhdr;
+	struct ubi_fm_eba *fm_eba;
+
+	int i, j;
+	size_t fm_pos = 0;
+	unsigned long long max_sqnum = 0;
+
+	ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
+	if (!ai)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&used);
+	INIT_LIST_HEAD(&ai->corr);
+	INIT_LIST_HEAD(&ai->free);
+	INIT_LIST_HEAD(&ai->erase);
+	INIT_LIST_HEAD(&ai->alien);
+	ai->volumes = RB_ROOT;
+	ai->min_ec = UBI_MAX_ERASECOUNTER;
+
+	ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
+					      sizeof(struct ubi_ainf_peb),
+					      0, 0, NULL);
+	if (!ai->aeb_slab_cache)
+		goto fail;
+
+	fmsb = (struct ubi_fm_sb *)(fm_raw);
+	ai->max_sqnum = fmsb->sqnum;
+	fm_pos += sizeof(struct ubi_fm_sb);
+	if (fm_pos >= fm_size)
+		goto fail;
+
+	fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+	fm_pos += sizeof(*fmhdr);
+	if (fm_pos >= fm_size)
+		goto fail;
+
+	if (fmhdr->magic != UBI_FM_HDR_MAGIC)
+		goto fail;
+
+	fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+	fm_pos += sizeof(*fmpl);
+	if (fm_pos >= fm_size)
+		goto fail;
+	if (fmpl->magic != UBI_FM_POOL_MAGIC)
+		goto fail;
+
+	/* read EC values from free list */
+	for (i = 0; i < be32_to_cpu(fmhdr->nfree); i++) {
+		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+		fm_pos += sizeof(*fmec);
+		if (fm_pos >= fm_size)
+			goto fail;
+
+		add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
+			be32_to_cpu(fmec->ec));
+	}
+
+	/* read EC values from used list */
+	for (i = 0; i < be32_to_cpu(fmhdr->nused); i++) {
+		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+		fm_pos += sizeof(*fmec);
+		if (fm_pos >= fm_size)
+			goto fail;
+
+		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+			be32_to_cpu(fmec->ec));
+	}
+
+	ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
+
+	/* Iterate over all volumes and read their EBA table */
+	for (i = 0; i < be32_to_cpu(fmhdr->nvol); i++) {
+		fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+		fm_pos += sizeof(*fmvhdr);
+		if (fm_pos >= fm_size)
+			goto fail;
+
+		if (fmvhdr->magic != UBI_FM_VHDR_MAGIC)
+			goto fail;
+
+		av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
+			be32_to_cpu(fmvhdr->used_ebs),
+			be32_to_cpu(fmvhdr->data_pad),
+			fmvhdr->vol_type, be32_to_cpu(fmvhdr->last_eb_bytes));
+
+		if (!av)
+			goto fail;
+
+		ai->vols_found++;
+		if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
+			ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
+
+		fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+		fm_pos += sizeof(*fm_eba);
+		fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->nused));
+		if (fm_pos >= fm_size)
+			goto fail;
+
+		if (fm_eba->magic != UBI_FM_EBA_MAGIC)
+			goto fail;
+
+		for (j = 0; j < be32_to_cpu(fm_eba->nused); j++) {
+
+			if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
+				continue;
+
+			aeb = NULL;
+			list_for_each_entry(tmp_aeb, &used, u.list) {
+				if (tmp_aeb->pnum == \
+					be32_to_cpu(fm_eba->pnum[j]))
+					aeb = tmp_aeb;
+			}
+
+			/* Corner case, this PEB must be in the pool */
+			if (!aeb)
+				continue;
+
+			aeb->lnum = j;
+
+			if (av->highest_lnum <= aeb->lnum)
+				av->highest_lnum = aeb->lnum;
+
+			assign_aeb_to_av(ai, aeb, av);
+
+			dbg_bld("Inserting PEB:%i (LEB %i) to vol %i",
+				aeb->pnum, aeb->lnum, av->vol_id);
+		}
+	}
+
+	/*
+	 * The remainning PEB in the used list are not used.
+	 * They lived in the fastmap pool but got never used.
+	 */
+	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
+		list_del(&tmp_aeb->u.list);
+		list_add_tail(&tmp_aeb->u.list, &ai->free);
+	}
+
+	if (scan_pool(ubi, ai, fmpl->pebs, be32_to_cpu(fmpl->size),
+			&max_sqnum) < 0)
+		goto fail;
+
+	if (max_sqnum > ai->max_sqnum)
+		ai->max_sqnum = max_sqnum;
+
+	return ai;
+
+fail:
+	ubi_destroy_ai(ai);
+	return NULL;
+}
+
+/**
+ * ubi_read_fastmap - read the fastmap
+ * @ubi: UBI device object
+ * @cb_sb_pnum: PEB number of the fastmap super block
+ */
+struct ubi_attach_info *ubi_read_fastmap(struct ubi_device *ubi,
+					  int cb_sb_pnum)
+{
+	struct ubi_fm_sb *fmsb;
+	struct ubi_vid_hdr *vh;
+	int ret, i, nblocks;
+	char *fm_raw;
+	size_t fm_size;
+	__be32 data_crc;
+	unsigned long long sqnum = 0;
+	struct ubi_attach_info *ai = NULL;
+
+	fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
+	if (!fmsb) {
+		ai = ERR_PTR(-ENOMEM);
+
+		goto out;
+	}
+
+	ret = ubi_io_read(ubi, fmsb, cb_sb_pnum, ubi->leb_start, sizeof(*fmsb));
+	if (ret) {
+		ubi_err("Unable to read fastmap super block");
+		ai = ERR_PTR(ret);
+		kfree(fmsb);
+
+		goto out;
+	}
+
+	if (fmsb->magic != UBI_FM_SB_MAGIC) {
+		ubi_err("Super block magic does not match");
+		ai = ERR_PTR(-EINVAL);
+		kfree(fmsb);
+
+		goto out;
+	}
+
+	if (fmsb->version != UBI_FM_FMT_VERSION) {
+		ubi_err("Unknown fastmap format version!");
+		ai = ERR_PTR(-EINVAL);
+		kfree(fmsb);
+
+		goto out;
+	}
+
+	nblocks = be32_to_cpu(fmsb->nblocks);
+
+	if (nblocks > UBI_FM_MAX_BLOCKS || nblocks < 1) {
+		ubi_err("Number of fastmap blocks is invalid");
+		ai = ERR_PTR(-EINVAL);
+		kfree(fmsb);
+
+		goto out;
+	}
+
+	fm_size = ubi->leb_size * nblocks;
+	/* fm_raw will contain the whole fastmap */
+	fm_raw = vzalloc(fm_size);
+	if (!fm_raw) {
+		ai = ERR_PTR(-ENOMEM);
+		kfree(fmsb);
+
+		goto out;
+	}
+
+	vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+	if (!vh) {
+		ai = ERR_PTR(-ENOMEM);
+		kfree(fmsb);
+
+		goto free_raw;
+	}
+
+	for (i = 0; i < nblocks; i++) {
+		ret = ubi_io_read_vid_hdr(ubi, be32_to_cpu(fmsb->block_loc[i]),
+			vh, 0);
+		if (ret) {
+			ubi_err("Unable to read fastmap block# %i (PEB: %i)",
+				i, be32_to_cpu(fmsb->block_loc[i]));
+			ai = ERR_PTR(ret);
+			kfree(fmsb);
+
+			goto free_vhdr;
+		}
+
+		if (i == 0) {
+			if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
+				ai = ERR_PTR(-EINVAL);
+				kfree(fmsb);
+
+				goto free_vhdr;
+			}
+		} else {
+			if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
+				goto free_vhdr;
+				kfree(fmsb);
+
+				ai = ERR_PTR(-EINVAL);
+			}
+		}
+
+		if (sqnum < be64_to_cpu(vh->sqnum))
+			sqnum = be64_to_cpu(vh->sqnum);
+
+		ret = ubi_io_read(ubi, fm_raw + (ubi->leb_size * i),
+			be32_to_cpu(fmsb->block_loc[i]),
+			ubi->leb_start, ubi->leb_size);
+
+		if (ret) {
+			ubi_err("Unable to read fastmap block# %i (PEB: %i)",
+				i, be32_to_cpu(fmsb->block_loc[i]));
+			ai = ERR_PTR(ret);
+			kfree(fmsb);
+
+			goto free_vhdr;
+		}
+	}
+
+	kfree(fmsb);
+
+	fmsb = (struct ubi_fm_sb *)fm_raw;
+	data_crc = crc32_be(UBI_CRC32_INIT, fm_raw + sizeof(*fmsb),
+		fm_size - sizeof(*fmsb));
+	if (data_crc != fmsb->data_crc) {
+		ubi_err("Fastmap data CRC is invalid");
+		ai = ERR_PTR(-EINVAL);
+
+		goto free_vhdr;
+	}
+
+	fmsb->sqnum = sqnum;
+
+	ai = ubi_attach_fastmap(ubi, fm_raw, fm_size);
+	if (!ai) {
+		ai = ERR_PTR(-EINVAL);
+
+		goto free_vhdr;
+	}
+
+	/* Store the fastmap position into the ubi_device struct */
+	ubi->fm = kmalloc(sizeof(struct ubi_fastmap), GFP_KERNEL);
+	if (!ubi->fm) {
+		ubi_destroy_ai(ai);
+		ai = ERR_PTR(-ENOMEM);
+
+		goto free_vhdr;
+	}
+
+	ubi->fm->size = fm_size;
+	ubi->fm->used_blocks = nblocks;
+
+	for (i = 0; i < UBI_FM_MAX_BLOCKS; i++) {
+		if (i < nblocks) {
+			ubi->fm->peb[i] = be32_to_cpu(fmsb->block_loc[i]);
+			ubi->fm->ec[i] = be32_to_cpu(fmsb->block_ec[i]);
+		} else {
+			ubi->fm->peb[i] = -1;
+			ubi->fm->ec[i] = 0;
+		}
+	}
+
+free_vhdr:
+	ubi_free_vid_hdr(ubi, vh);
+free_raw:
+	vfree(fm_raw);
+out:
+	return ai;
+}
+
+/**
+ * ubi_find_fastmap - searches the first UBI_FM_MAX_START PEBs for the
+ * fastmap super block.
+ * @ubi: UBI device object
+ */
+int ubi_find_fastmap(struct ubi_device *ubi)
+{
+	int i, ret;
+	int fm_sb = -ENOENT;
+	struct ubi_vid_hdr *vhdr;
+
+	vhdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+	if (!vhdr)
+		return -ENOMEM;
+
+	for (i = 0; i < UBI_FM_MAX_START; i++) {
+		ret = ubi_io_read_vid_hdr(ubi, i, vhdr, 0);
+		/* ignore read errors */
+		if (ret)
+			continue;
+
+		if (be32_to_cpu(vhdr->vol_id) == UBI_FM_SB_VOLUME_ID) {
+			fm_sb = i;
+			break;
+		}
+	}
+
+	ubi_free_vid_hdr(ubi, vhdr);
+	return fm_sb;
+}
+
+/**
+ * ubi_write_fastmap - writes a fastmap
+ * @ubi: UBI device object
+ * @new_fm: the to be written fastmap
+ */
+static int ubi_write_fastmap(struct ubi_device *ubi,
+				struct ubi_fastmap *new_fm)
+{
+	int ret;
+	size_t fm_pos = 0;
+	char *fm_raw;
+	int i, j;
+
+	struct ubi_fm_sb *fmsb;
+	struct ubi_fm_hdr *fmh;
+	struct ubi_fm_scan_pool *fmpl;
+	struct ubi_fm_ec *fec;
+	struct ubi_fm_volhdr *fvh;
+	struct ubi_fm_eba *feba;
+
+	struct rb_node *node;
+	struct ubi_wl_entry *wl_e;
+	struct ubi_volume *vol;
+
+	struct ubi_vid_hdr *avhdr, *dvhdr;
+
+	int nfree, nused, nvol;
+
+	fm_raw = vzalloc(new_fm->size);
+	if (!fm_raw) {
+		ret = -ENOMEM;
+
+		goto out;
+	}
+
+	avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
+	if (!avhdr) {
+		ret = -ENOMEM;
+
+		goto out_vfree;
+	}
+
+	dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
+	if (!dvhdr) {
+		ret = -ENOMEM;
+
+		goto out_kfree;
+	}
+
+	spin_lock(&ubi->volumes_lock);
+	spin_lock(&ubi->wl_lock);
+
+	fmsb = (struct ubi_fm_sb *)fm_raw;
+	fm_pos += sizeof(*fmsb);
+	ubi_assert(fm_pos <= new_fm->size);
+
+	fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+	fm_pos += sizeof(*fmh);
+	ubi_assert(fm_pos <= new_fm->size);
+
+	fmsb->magic = UBI_FM_SB_MAGIC;
+	fmsb->version = UBI_FM_FMT_VERSION;
+	fmsb->nblocks = cpu_to_be32(new_fm->used_blocks);
+	/* the max sqnum will be filled in while *reading* the fastmap */
+	fmsb->sqnum = 0;
+
+	fmh->magic = UBI_FM_HDR_MAGIC;
+	nfree = 0;
+	nused = 0;
+	nvol = 0;
+
+	fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+	fm_pos += sizeof(*fmpl);
+	fmpl->magic = UBI_FM_POOL_MAGIC;
+	fmpl->size = cpu_to_be32(ubi->fm_pool.size);
+
+	for (i = 0; i < ubi->fm_pool.size; i++)
+		fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
+
+	for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
+		wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+		fec->pnum = cpu_to_be32(wl_e->pnum);
+		fec->ec = cpu_to_be32(wl_e->ec);
+
+		nfree++;
+		fm_pos += sizeof(*fec);
+		ubi_assert(fm_pos <= new_fm->size);
+	}
+	fmh->nfree = cpu_to_be32(nfree);
+
+	for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
+		wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+		fec->pnum = cpu_to_be32(wl_e->pnum);
+		fec->ec = cpu_to_be32(wl_e->ec);
+
+		nused++;
+		fm_pos += sizeof(*fec);
+		ubi_assert(fm_pos <= new_fm->size);
+	}
+	fmh->nused = cpu_to_be32(nused);
+
+	for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
+		vol = ubi->volumes[i];
+
+		if (!vol)
+			continue;
+
+		nvol++;
+
+		fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+		fm_pos += sizeof(*fvh);
+		ubi_assert(fm_pos <= new_fm->size);
+
+		fvh->magic = UBI_FM_VHDR_MAGIC;
+		fvh->vol_id = cpu_to_be32(vol->vol_id);
+		fvh->vol_type = vol->vol_type;
+		fvh->used_ebs = cpu_to_be32(vol->used_ebs);
+		fvh->data_pad = cpu_to_be32(vol->data_pad);
+		fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
+
+		ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
+			vol->vol_type == UBI_STATIC_VOLUME);
+
+		feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+		fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->used_ebs);
+		ubi_assert(fm_pos <= new_fm->size);
+
+		for (j = 0; j < vol->used_ebs; j++)
+			feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
+
+		feba->nused = cpu_to_be32(j);
+		feba->magic = UBI_FM_EBA_MAGIC;
+	}
+	fmh->nvol = cpu_to_be32(nvol);
+
+	avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+	avhdr->lnum = 0;
+
+	spin_unlock(&ubi->wl_lock);
+	spin_unlock(&ubi->volumes_lock);
+
+	dbg_bld("Writing fastmap SB to PEB %i\n", new_fm->peb[0]);
+	ret = ubi_io_write_vid_hdr(ubi, new_fm->peb[0], avhdr);
+	if (ret) {
+		ubi_err("Unable to write vid_hdr to fastmap SB!\n");
+
+		goto out_kfree;
+	}
+
+	for (i = 0; i < UBI_FM_MAX_BLOCKS; i++) {
+		fmsb->block_loc[i] = cpu_to_be32(new_fm->peb[i]);
+		fmsb->block_ec[i] = cpu_to_be32(new_fm->ec[i]);
+	}
+
+	fmsb->data_crc = 0;
+	fmsb->data_crc = crc32_be(UBI_CRC32_INIT, fm_raw + sizeof(*fmsb),
+		new_fm->size - sizeof(*fmsb));
+
+	for (i = 1; i < new_fm->used_blocks; i++) {
+		dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+		dvhdr->lnum = cpu_to_be32(i);
+		dbg_bld("Writing fastmap data to PEB %i sqnum %llu\n",
+			new_fm->peb[i], be64_to_cpu(dvhdr->sqnum));
+		ret = ubi_io_write_vid_hdr(ubi, new_fm->peb[i], dvhdr);
+		if (ret) {
+			ubi_err("Unable to write vid_hdr to PEB %i!\n",
+				new_fm->peb[i]);
+
+			goto out_kfree;
+		}
+	}
+
+	for (i = 0; i < new_fm->used_blocks; i++) {
+		ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
+			new_fm->peb[i], ubi->leb_start, ubi->leb_size);
+		if (ret) {
+			ubi_err("Unable to write fastmap to PEB %i!\n",
+				new_fm->peb[i]);
+
+			goto out_kfree;
+		}
+	}
+
+	ubi_assert(new_fm);
+	ubi->fm = new_fm;
+
+	dbg_bld("Fastmap written!");
+
+out_kfree:
+	kfree(avhdr);
+out_vfree:
+	vfree(fm_raw);
+out:
+	return ret;
+}
+
+/**
+ * get_ec - returns the erase counter of a given PEB
+ * @ubi: UBI device object
+ * @pnum: PEB number
+ */
+static int get_ec(struct ubi_device *ubi, int pnum)
+{
+	struct ubi_wl_entry *e;
+
+	e = ubi->lookuptbl[pnum];
+
+	/* can this really happen? */
+	if (!e)
+		return ubi->mean_ec ?: 1;
+	else
+		return e->ec;
+}
+
+/**
+ * ubi_update_fastmap - will be called by UBI if a volume changes or
+ * a fastmap pool becomes full.
+ * @ubi: UBI device object
+ */
+int ubi_update_fastmap(struct ubi_device *ubi)
+{
+	int ret, i;
+	struct ubi_fastmap *new_fm;
+
+	if (ubi->ro_mode)
+		return 0;
+
+	new_fm = kmalloc(sizeof(*new_fm), GFP_KERNEL);
+	if (!new_fm)
+		return -ENOMEM;
+
+	ubi->old_fm = ubi->fm;
+	ubi->fm = NULL;
+
+	if (ubi->old_fm) {
+		spin_lock(&ubi->wl_lock);
+		new_fm->peb[0] = ubi_wl_get_fm_peb(ubi, UBI_FM_MAX_START);
+		spin_unlock(&ubi->wl_lock);
+		/* no fresh early PEB was found, reuse the old one */
+		if (new_fm->peb[0] < 0) {
+			struct ubi_ec_hdr *ec_hdr;
+
+			ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+			if (!ec_hdr) {
+				kfree(new_fm);
+				return -ENOMEM;
+			}
+
+			/* we have to erase the block by hand */
+
+			ret = ubi_io_read_ec_hdr(ubi, ubi->old_fm->peb[0],
+				ec_hdr, 0);
+			if (ret) {
+				ubi_err("Unable to read EC header");
+
+				kfree(new_fm);
+				kfree(ec_hdr);
+				return -EINVAL;
+			}
+
+			ret = ubi_io_sync_erase(ubi, ubi->old_fm->peb[0], 0);
+			if (ret < 0) {
+				ubi_err("Unable to erase old SB");
+
+				kfree(new_fm);
+				kfree(ec_hdr);
+				return -EINVAL;
+			}
+
+			ec_hdr->ec += ret;
+			if (ret > UBI_MAX_ERASECOUNTER) {
+				ubi_err("Erase counter overflow!");
+				kfree(new_fm);
+				kfree(ec_hdr);
+				return -EINVAL;
+			}
+
+			ret = ubi_io_write_ec_hdr(ubi, ubi->old_fm->peb[0],
+				ec_hdr);
+			kfree(ec_hdr);
+			if (ret) {
+				ubi_err("Unable to write new EC header");
+				kfree(new_fm);
+				return -EINVAL;
+			}
+
+			new_fm->peb[0] = ubi->old_fm->peb[0];
+			new_fm->ec[0] = ubi->old_fm->ec[0];
+		} else {
+			/* we've got a new early PEB, return the old one */
+			ubi_wl_put_fm_peb(ubi, ubi->old_fm->peb[0], 0);
+			new_fm->ec[0] = get_ec(ubi, new_fm->peb[0]);
+		}
+
+		/* return all other fastmap block to the wl system */
+		for (i = 1; i < UBI_FM_MAX_BLOCKS; i++) {
+			if (ubi->old_fm->peb[i] >= 0)
+				ubi_wl_put_fm_peb(ubi, ubi->old_fm->peb[i], 0);
+			else
+				break;
+		}
+	} else {
+		spin_lock(&ubi->wl_lock);
+		new_fm->peb[0] = ubi_wl_get_fm_peb(ubi, UBI_FM_MAX_START);
+		spin_unlock(&ubi->wl_lock);
+		if (new_fm->peb[0] < 0) {
+			ubi_err("Could not find an early PEB");
+			kfree(new_fm);
+			return -ENOSPC;
+		}
+		new_fm->ec[0] = get_ec(ubi, new_fm->peb[0]);
+	}
+
+	new_fm->size = sizeof(struct ubi_fm_hdr) + \
+			sizeof(struct ubi_fm_scan_pool) + \
+			(ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
+			(sizeof(struct ubi_fm_eba) + \
+			(ubi->peb_count * sizeof(__be32))) + \
+			sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
+	new_fm->size = roundup(new_fm->size, ubi->leb_size);
+
+	new_fm->used_blocks = new_fm->size / ubi->leb_size;
+
+	if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
+		ubi_err("Fastmap too large");
+		kfree(new_fm);
+
+		return -ENOSPC;
+	}
+
+	/* give the wl subsystem a chance to produce some free blocks */
+	cond_resched();
+
+	for (i = 1; i < UBI_FM_MAX_BLOCKS; i++) {
+		if (i < new_fm->used_blocks) {
+			spin_lock(&ubi->wl_lock);
+			new_fm->peb[i] = ubi_wl_get_fm_peb(ubi, INT_MAX);
+			spin_unlock(&ubi->wl_lock);
+			if (new_fm->peb[i] < 0) {
+				ubi_err("Could not get any free erase block");
+
+				while (i--)
+					ubi_wl_put_fm_peb(ubi, new_fm->peb[i],
+						0);
+
+				kfree(new_fm);
+
+				return -ENOSPC;
+			}
+
+			new_fm->ec[i] = get_ec(ubi, new_fm->peb[i]);
+		} else {
+			new_fm->peb[i] = -1;
+			new_fm->ec[i] = 0;
+		}
+	}
+
+	kfree(ubi->old_fm);
+	ubi->old_fm = NULL;
+
+	return ubi_write_fastmap(ubi, new_fm);
+}
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index 468ffbc..631a1d0 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -375,4 +375,122 @@  struct ubi_vtbl_record {
 	__be32  crc;
 } __packed;
 
+/* UBI fastmap on-flash data structures */
+
+#define UBI_FM_SB_VOLUME_ID	(UBI_LAYOUT_VOLUME_ID + 1)
+#define UBI_FM_DATA_VOLUME_ID	(UBI_LAYOUT_VOLUME_ID + 2)
+
+/* fastmap on-flash data structure format version */
+#define UBI_FM_FMT_VERSION	1
+
+#define UBI_FM_SB_MAGIC		0x7B11D69F
+#define UBI_FM_HDR_MAGIC	0xD4B82EF7
+#define UBI_FM_VHDR_MAGIC	0xFA370ED1
+#define UBI_FM_POOL_MAGIC	0x67AF4D08
+#define UBI_FM_EBA_MAGIC	0xf0c040a8
+
+#define UBI_FM_MAX_START	64
+#define UBI_FM_MAX_BLOCKS	32
+#define UBI_FM_MIN_POOL_SIZE	8
+#define UBI_FM_MAX_POOL_SIZE	256
+
+/**
+ * struct ubi_fm_sb - UBI fastmap super block
+ * @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC)
+ * @version: format version of this fastmap
+ * @data_crc: CRC over the fastmap data
+ * @nblocks: number of PEBs used by this fastmap
+ * @block_loc: an array containing the location of all PEBs of the fastmap
+ * @block_ec: the erase counter of each used PEB
+ * @sqnum: highest sequence number value at the time while taking the fastmap
+ *
+ */
+struct ubi_fm_sb {
+	__be32 magic;
+	__u8 version;
+	__u8 padding1[3];
+	__be32 data_crc;
+	__be32 nblocks;
+	__be32 block_loc[UBI_FM_MAX_BLOCKS];
+	__be32 block_ec[UBI_FM_MAX_BLOCKS];
+	__be64 sqnum;
+	__u8 padding2[32];
+} __packed;
+
+/**
+ * struct ubi_fm_hdr - header of the fastmap data set
+ * @magic: fastmap header magic number (%UBI_FM_HDR_MAGIC)
+ * @nfree: number of free PEBs known by this fastmap
+ * @nused: number of used PEBs known by this fastmap
+ * @nvol: number of UBI volumes known by this fastmap
+ */
+struct ubi_fm_hdr {
+	__be32 magic;
+	__be32 nfree;
+	__be32 nused;
+	__be32 nvol;
+	__u8 padding[16];
+} __packed;
+
+/* struct ubi_fm_hdr is followed by struct ubi_fm_scan_pool */
+
+/**
+ * struct ubi_fm_scan_pool - Fastmap pool PEBs to be scanned while attaching
+ * @magic: pool magic numer (%UBI_FM_POOL_MAGIC)
+ * @size: current pool size
+ * @pebs: an array containing the location of all PEBs in this pool
+ */
+struct ubi_fm_scan_pool {
+	__be32 magic;
+	__be32 size;
+	__be32 pebs[UBI_FM_MAX_POOL_SIZE];
+	__be32 padding[4];
+} __packed;
+
+/* ubi_fm_scan_pool is followed by nfree+nused struct ubi_fm_ec records */
+
+/**
+ * struct ubi_fm_ec - stores the erase counter of a PEB
+ * @pnum: PEB number
+ * @ec: ec of this PEB
+ */
+struct ubi_fm_ec {
+	__be32 pnum;
+	__be32 ec;
+} __packed;
+
+/**
+ * struct ubi_fm_volhdr - Fastmap volume header
+ * it identifies the start of an eba table
+ * @magic: Fastmap volume header magic number (%UBI_FM_VHDR_MAGIC)
+ * @vol_id: volume id of the fastmapped volume
+ * @vol_type: type of the fastmapped volume
+ * @data_pad: data_pad value of the fastmapped volume
+ * @used_ebs: number of used LEBs within this volume
+ * @last_eb_bytes: number of bytes used in the last LEB
+ */
+struct ubi_fm_volhdr {
+	__be32 magic;
+	__be32 vol_id;
+	__u8 vol_type;
+	__u8 padding1[3];
+	__be32 data_pad;
+	__be32 used_ebs;
+	__be32 last_eb_bytes;
+	__u8 padding2[8];
+} __packed;
+
+/* struct ubi_fm_volhdr is followed by one struct ubi_fm_eba records */
+
+/**
+ * struct ubi_fm_eba - denotes an association beween a PEB and LEB
+ * @magic EBA table magic number
+ * @nused: number of table entries
+ * @pnum: PEB number of LEB (LEB is the index)
+ */
+struct ubi_fm_eba {
+	__be32 magic;
+	__be32 nused;
+	__be32 pnum[0];
+} __packed;
 #endif /* !__UBI_MEDIA_H__ */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 5275632..299a601 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -202,6 +202,39 @@  struct ubi_rename_entry {
 struct ubi_volume_desc;
 
 /**
+ * struct ubi_fastmap - in-memory fastmap data structure.
+ * @peb: PEBs used by the current fastamp
+ * @ec: the erase counter of each used PEB
+ * @size: size of the fastmap in bytes
+ * @used_blocks: number of used PEBs
+ */
+struct ubi_fastmap {
+	int peb[UBI_FM_MAX_BLOCKS];
+	unsigned int ec[UBI_FM_MAX_BLOCKS];
+	size_t size;
+	int used_blocks;
+};
+
+/**
+ * struct ubi_fm_pool - in-memory fastmap pool
+ * @pebs: PEBs in this pool
+ * @used: number of used PEBs
+ * @size: total number of PEBs in this pool
+ * @max_size: maximal size of the pool
+ *
+ * A pool gets filled with up to max_size.
+ * If all PEBs within the pool are used a new fastmap will be written
+ * to the flash and the pool gets refilled with empty PEBs.
+ *
+ */
+struct ubi_fm_pool {
+	int pebs[UBI_FM_MAX_POOL_SIZE];
+	int used;
+	int size;
+	int max_size;
+};
+
+/**
  * struct ubi_volume - UBI volume description data structure.
  * @dev: device object to make use of the the Linux device model
  * @cdev: character device object to create character device
@@ -336,6 +369,13 @@  struct ubi_wl_entry;
  * @ltree: the lock tree
  * @alc_mutex: serializes "atomic LEB change" operations
  *
+ * @fm: in-memory data structure of the currently used fastmap
+ * @old_fm: in-memory data structure old fastmap.
+ *	    (only valid while writing a new one)
+ * @fm_pool: in-memory data structure of the fastmap pool
+ * @attached_by_scanning: this UBI device was attached by the old scanning
+ *			  methold. All fastmap volumes have to be deleted.
+ *
  * @used: RB-tree of used physical eraseblocks
  * @erroneous: RB-tree of erroneous used physical eraseblocks
  * @free: RB-tree of free physical eraseblocks
@@ -427,6 +467,12 @@  struct ubi_device {
 	struct rb_root ltree;
 	struct mutex alc_mutex;
 
+	/* Fastmap stuff */
+	struct ubi_fastmap *fm;
+	struct ubi_fastmap *old_fm;
+	struct ubi_fm_pool fm_pool;
+	bool attached_by_scanning;
+
 	/* Wear-leveling sub-system's stuff */
 	struct rb_root used;
 	struct rb_root erroneous;
@@ -604,7 +650,7 @@  extern struct class *ubi_class;
 extern struct mutex ubi_devices_mutex;
 extern struct blocking_notifier_head ubi_notifiers;
 
-/* scan.c */
+/* attach.c */
 int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
 		  int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips);
 struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
@@ -661,6 +707,7 @@  int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
 		     struct ubi_vid_hdr *vid_hdr);
 int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi);
 
 /* wl.c */
 int ubi_wl_get_peb(struct ubi_device *ubi);
@@ -670,6 +717,9 @@  int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
 void ubi_wl_close(struct ubi_device *ubi);
 int ubi_thread(void *u);
+int ubi_wl_get_fm_peb(struct ubi_device *ubi, int max_pnum);
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, int pnum, int torture);
+int ubi_is_fm_block(struct ubi_device *ubi, int pnum);
 
 /* io.c */
 int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
@@ -706,6 +756,15 @@  void ubi_free_internal_volumes(struct ubi_device *ubi);
 void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
 void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
 			    struct ubi_volume_info *vi);
+/* scan.c */
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+		      int pnum, const struct ubi_vid_hdr *vid_hdr);
+
+/* fastmap.c */
+int ubi_update_fastmap(struct ubi_device *ubi);
+struct ubi_attach_info *ubi_read_fastmap(struct ubi_device *ubi,
+					 int fm_sb_pnum);
+int ubi_find_fastmap(struct ubi_device *ubi);
 
 /*
  * ubi_rb_for_each_entry - walk an RB-tree.
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index c143e61..c7ab311 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -162,6 +162,25 @@  static int self_check_in_pq(const struct ubi_device *ubi,
 			    struct ubi_wl_entry *e);
 
 /**
+ *  ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
+ *  @ubi: UBI device description object
+ *  @pnum: the to be checked PEB
+ */
+int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
+{
+	int i;
+
+	if (!ubi->fm)
+		return 0;
+
+	for (i = 0; i < ubi->fm->used_blocks; i++)
+		if (ubi->fm->peb[i] == pnum)
+			return 1;
+
+	return 0;
+}
+
+/**
  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
  * @e: the wear-leveling entry to add
  * @root: the root of the tree
@@ -367,13 +386,70 @@  static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff)
 }
 
 /**
- * ubi_wl_get_peb - get a physical eraseblock.
+ * find_early_wl_entry - find wear-leveling entry with a low pnum.
+ * @root: the RB-tree where to look for
+ * @max_pnum: highest possible pnum
+ *
+ * This function looks for a wear leveling entry containing a eb which
+ * is in front of the memory.
+ */
+static struct ubi_wl_entry *find_early_wl_entry(struct rb_root *root,
+						int max_pnum)
+{
+	struct rb_node *p;
+	struct ubi_wl_entry *e, *victim = NULL;
+
+	ubi_rb_for_each_entry(p, e, root, u.rb) {
+		if (e->pnum < max_pnum) {
+			victim = e;
+			max_pnum = e->pnum;
+		}
+	}
+
+	return victim;
+}
+
+/**
+ * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
+ * @ubi: UBI device description object
+ * @max_pnum: the highest acceptable erase block number
+ *
+ * The function returns a physical erase block with a given maximal number
+ * and removes it from the wl subsystem.
+ * Must be called with wl_lock held!
+ */
+int ubi_wl_get_fm_peb(struct ubi_device *ubi, int max_pnum)
+{
+	int ret = -ENOSPC;
+	struct ubi_wl_entry *e;
+
+	if (!ubi->free.rb_node) {
+		ubi_err("no free eraseblocks");
+
+		goto out;
+	}
+
+	e = find_early_wl_entry(&ubi->free, max_pnum);
+	if (!e)
+		goto out;
+
+	ret = e->pnum;
+
+	/* remove it from the free list,
+	 * the wl subsystem does no longer know this erase block */
+	rb_erase(&e->u.rb, &ubi->free);
+out:
+	return ret;
+}
+
+/**
+ * __ubi_wl_get_peb - get a physical eraseblock.
  * @ubi: UBI device description object
  *
  * This function returns a physical eraseblock in case of success and a
  * negative error code in case of failure. Might sleep.
  */
-int ubi_wl_get_peb(struct ubi_device *ubi)
+static int __ubi_wl_get_peb(struct ubi_device *ubi)
 {
 	int err;
 	struct ubi_wl_entry *e, *first, *last;
@@ -424,6 +500,34 @@  retry:
 	return e->pnum;
 }
 
+/* ubi_wl_get_peb - works exaclty like __ubi_wl_get_peb but keeps track of
+ * the fastmap pool.
+ */
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+	struct ubi_fm_pool *pool = &ubi->fm_pool;
+
+	/* pool contains no free blocks, create a new one
+	 * and write a fastmap */
+	if (pool->used == pool->size || !pool->size) {
+		for (pool->size = 0; pool->size < pool->max_size;
+				pool->size++) {
+			pool->pebs[pool->size] = __ubi_wl_get_peb(ubi);
+			if (pool->pebs[pool->size] < 0)
+				break;
+		}
+
+		pool->used = 0;
+		ubi_update_fastmap(ubi);
+	}
+
+	/* we got not a single free PEB */
+	if (!pool->size)
+		return -1;
+
+	return pool->pebs[pool->used++];
+}
+
 /**
  * prot_queue_del - remove a physical eraseblock from the protection queue.
  * @ubi: UBI device description object
@@ -589,6 +693,9 @@  static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
 {
 	struct ubi_work *wl_wrk;
 
+	ubi_assert(e);
+	ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
+
 	dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
 	       e->pnum, e->ec, torture);
 
@@ -605,6 +712,56 @@  static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
 }
 
 /**
+ * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
+ * sub-system.
+ *
+ * see: ubi_wl_put_peb()
+ */
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, int pnum, int torture)
+{
+	int i, err = 0;
+	struct ubi_wl_entry *e;
+
+	dbg_wl("PEB %d", pnum);
+	ubi_assert(pnum >= 0);
+	ubi_assert(pnum < ubi->peb_count);
+
+	spin_lock(&ubi->wl_lock);
+	e = ubi->lookuptbl[pnum];
+
+	/* This can happen if we recovered from a fastmap the very
+	 * frist time and writing now a new one. In this case the wl system
+	 * has never seen any PEB used by the original fastmap.
+	 */
+	if (!e) {
+		ubi_assert(ubi->old_fm);
+		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_ATOMIC);
+		if (!e) {
+			spin_unlock(&ubi->wl_lock);
+			return -ENOMEM;
+		}
+
+		e->pnum = pnum;
+		e->ec = 0;
+		/* use the ec value from the fastmap */
+		for (i = 0; i < UBI_FM_MAX_BLOCKS; i++) {
+			if (pnum == ubi->old_fm->peb[i]) {
+				e->ec = ubi->old_fm->ec[i];
+				break;
+			}
+		}
+		ubi_assert(e->ec);
+		ubi->lookuptbl[pnum] = e;
+	}
+
+	spin_unlock(&ubi->wl_lock);
+
+	err = schedule_erase(ubi, e, torture);
+
+	return err;
+}
+
+/**
  * wear_leveling_worker - wear-leveling worker function.
  * @ubi: UBI device description object
  * @wrk: the work object
@@ -981,6 +1138,8 @@  static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
 
 	dbg_wl("erase PEB %d EC %d", pnum, e->ec);
 
+	ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
+
 	err = sync_erase(ubi, e, wl_wrk->torture);
 	if (!err) {
 		/* Fine, we've erased it successfully */
@@ -1415,6 +1574,7 @@  int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
 
 		e->pnum = aeb->pnum;
 		e->ec = aeb->ec;
+		ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
 		ubi->lookuptbl[e->pnum] = e;
 		if (schedule_erase(ubi, e, 0)) {
 			kmem_cache_free(ubi_wl_entry_slab, e);
@@ -1432,7 +1592,10 @@  int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
 		e->pnum = aeb->pnum;
 		e->ec = aeb->ec;
 		ubi_assert(e->ec >= 0);
+		ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
+
 		wl_tree_add(e, &ubi->free);
+
 		ubi->lookuptbl[e->pnum] = e;
 	}
 
@@ -1447,6 +1610,9 @@  int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
 			e->pnum = aeb->pnum;
 			e->ec = aeb->ec;
 			ubi->lookuptbl[e->pnum] = e;
+			if (ubi_is_fm_block(ubi, aeb->pnum))
+				continue;
+
 			if (!aeb->scrub) {
 				dbg_wl("add PEB %d EC %d to the used tree",
 				       e->pnum, e->ec);