diff mbox series

[v2,03/11] mm/gup: migrate PIN_LONGTERM dev coherent pages to system

Message ID 20211206185251.20646-4-alex.sierra@amd.com
State Not Applicable
Headers show
Series Add MEMORY_DEVICE_COHERENT for coherent device memory mapping | expand

Commit Message

Sierra Guiza, Alejandro (Alex) Dec. 6, 2021, 6:52 p.m. UTC
Avoid long term pinning for Coherent device type pages. This could
interfere with their own device memory manager.
If caller tries to get user device coherent pages with PIN_LONGTERM flag
set, those pages will be migrated back to system memory.

Signed-off-by: Alex Sierra <alex.sierra@amd.com>
---
 mm/gup.c | 32 ++++++++++++++++++++++++++++++--
 1 file changed, 30 insertions(+), 2 deletions(-)

Comments

Alistair Popple Dec. 8, 2021, 11:31 a.m. UTC | #1
On Tuesday, 7 December 2021 5:52:43 AM AEDT Alex Sierra wrote:
> Avoid long term pinning for Coherent device type pages. This could
> interfere with their own device memory manager.
> If caller tries to get user device coherent pages with PIN_LONGTERM flag
> set, those pages will be migrated back to system memory.
> 
> Signed-off-by: Alex Sierra <alex.sierra@amd.com>
> ---
>  mm/gup.c | 32 ++++++++++++++++++++++++++++++--
>  1 file changed, 30 insertions(+), 2 deletions(-)
> 
> diff --git a/mm/gup.c b/mm/gup.c
> index 886d6148d3d0..1572eacf07f4 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -1689,17 +1689,37 @@ struct page *get_dump_page(unsigned long addr)
>  #endif /* CONFIG_ELF_CORE */
>  
>  #ifdef CONFIG_MIGRATION
> +static int migrate_device_page(unsigned long address,
> +				struct page *page)
> +{
> +	struct vm_area_struct *vma = find_vma(current->mm, address);
> +	struct vm_fault vmf = {
> +		.vma = vma,
> +		.address = address & PAGE_MASK,
> +		.flags = FAULT_FLAG_USER,
> +		.pgoff = linear_page_index(vma, address),
> +		.gfp_mask = GFP_KERNEL,
> +		.page = page,
> +	};
> +	if (page->pgmap && page->pgmap->ops->migrate_to_ram)
> +		return page->pgmap->ops->migrate_to_ram(&vmf);

How does this synchronise against pgmap being released? As I understand things
at this point we're not holding a reference on either the page or pgmap, so
the page and therefore the pgmap may have been freed.

I think a similar problem exists for device private fault handling as well and
it has been on my list of things to fix for a while. I think the solution is to
call try_get_page(), except it doesn't work with device pages due to the whole
refcount thing. That issue is blocking a fair bit of work now so I've started
looking into it.

> +
> +	return -EBUSY;
> +}
> +
>  /*
>   * Check whether all pages are pinnable, if so return number of pages.  If some
>   * pages are not pinnable, migrate them, and unpin all pages. Return zero if
>   * pages were migrated, or if some pages were not successfully isolated.
>   * Return negative error if migration fails.
>   */
> -static long check_and_migrate_movable_pages(unsigned long nr_pages,
> +static long check_and_migrate_movable_pages(unsigned long start,
> +					    unsigned long nr_pages,
>  					    struct page **pages,
>  					    unsigned int gup_flags)
>  {
>  	unsigned long i;
> +	unsigned long page_index;
>  	unsigned long isolation_error_count = 0;
>  	bool drain_allow = true;
>  	LIST_HEAD(movable_page_list);
> @@ -1720,6 +1740,10 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
>  		 * If we get a movable page, since we are going to be pinning
>  		 * these entries, try to move them out if possible.
>  		 */
> +		if (is_device_page(head)) {
> +			page_index = i;
> +			goto unpin_pages;
> +		}
>  		if (!is_pinnable_page(head)) {
>  			if (PageHuge(head)) {
>  				if (!isolate_huge_page(head, &movable_page_list))
> @@ -1750,12 +1774,16 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
>  	if (list_empty(&movable_page_list) && !isolation_error_count)
>  		return nr_pages;
>  
> +unpin_pages:
>  	if (gup_flags & FOLL_PIN) {
>  		unpin_user_pages(pages, nr_pages);
>  	} else {
>  		for (i = 0; i < nr_pages; i++)
>  			put_page(pages[i]);
>  	}
> +	if (is_device_page(head))
> +		return migrate_device_page(start + page_index * PAGE_SIZE, head);

This isn't very optimal - if a range contains more than one device page (which
seems likely) we will have to go around the whole gup/check_and_migrate loop
once for each device page which seems unnecessary. You should be able to either
build a list or migrate them as you go through the loop. I'm also currently
looking into how to extend migrate_pages() to support device pages which might
be useful here too.

> +
>  	if (!list_empty(&movable_page_list)) {
>  		ret = migrate_pages(&movable_page_list, alloc_migration_target,
>  				    NULL, (unsigned long)&mtc, MIGRATE_SYNC,
> @@ -1798,7 +1826,7 @@ static long __gup_longterm_locked(struct mm_struct *mm,
>  					     NULL, gup_flags);
>  		if (rc <= 0)
>  			break;
> -		rc = check_and_migrate_movable_pages(rc, pages, gup_flags);
> +		rc = check_and_migrate_movable_pages(start, rc, pages, gup_flags);
>  	} while (!rc);
>  	memalloc_pin_restore(flags);
>  
>
Jason Gunthorpe Dec. 8, 2021, 1:53 p.m. UTC | #2
On Wed, Dec 08, 2021 at 10:31:58PM +1100, Alistair Popple wrote:
> On Tuesday, 7 December 2021 5:52:43 AM AEDT Alex Sierra wrote:
> > Avoid long term pinning for Coherent device type pages. This could
> > interfere with their own device memory manager.
> > If caller tries to get user device coherent pages with PIN_LONGTERM flag
> > set, those pages will be migrated back to system memory.
> > 
> > Signed-off-by: Alex Sierra <alex.sierra@amd.com>
> >  mm/gup.c | 32 ++++++++++++++++++++++++++++++--
> >  1 file changed, 30 insertions(+), 2 deletions(-)
> > 
> > diff --git a/mm/gup.c b/mm/gup.c
> > index 886d6148d3d0..1572eacf07f4 100644
> > +++ b/mm/gup.c
> > @@ -1689,17 +1689,37 @@ struct page *get_dump_page(unsigned long addr)
> >  #endif /* CONFIG_ELF_CORE */
> >  
> >  #ifdef CONFIG_MIGRATION
> > +static int migrate_device_page(unsigned long address,
> > +				struct page *page)
> > +{
> > +	struct vm_area_struct *vma = find_vma(current->mm, address);
> > +	struct vm_fault vmf = {
> > +		.vma = vma,
> > +		.address = address & PAGE_MASK,
> > +		.flags = FAULT_FLAG_USER,
> > +		.pgoff = linear_page_index(vma, address),
> > +		.gfp_mask = GFP_KERNEL,
> > +		.page = page,
> > +	};
> > +	if (page->pgmap && page->pgmap->ops->migrate_to_ram)
> > +		return page->pgmap->ops->migrate_to_ram(&vmf);
> 
> How does this synchronise against pgmap being released? As I understand things
> at this point we're not holding a reference on either the page or pgmap, so
> the page and therefore the pgmap may have been freed.

For sure, this can't keep touching the pages[] array after it unpinned
them:

> >  	if (gup_flags & FOLL_PIN) {
> >  		unpin_user_pages(pages, nr_pages);
               ^^^^^^^^^^^^^^^^^^^

> >  	} else {
> >  		for (i = 0; i < nr_pages; i++)
> >  			put_page(pages[i]);
> >  	}
> > +	if (is_device_page(head))
> > +		return migrate_device_page(start + page_index * PAGE_SIZE, head);

It was safe before this patch as isolate_lru_page(head) has a
get_page() inside.

Also, please try hard not to turn this function into goto spaghetti

> I think a similar problem exists for device private fault handling as well and
> it has been on my list of things to fix for a while. I think the solution is to
> call try_get_page(), except it doesn't work with device pages due to the whole
> refcount thing. That issue is blocking a fair bit of work now so I've started
> looking into it.

Where is this?

Jason
Felix Kuehling Dec. 8, 2021, 4:58 p.m. UTC | #3
Am 2021-12-08 um 6:31 a.m. schrieb Alistair Popple:
> On Tuesday, 7 December 2021 5:52:43 AM AEDT Alex Sierra wrote:
>> Avoid long term pinning for Coherent device type pages. This could
>> interfere with their own device memory manager.
>> If caller tries to get user device coherent pages with PIN_LONGTERM flag
>> set, those pages will be migrated back to system memory.
>>
>> Signed-off-by: Alex Sierra <alex.sierra@amd.com>
>> ---
>>  mm/gup.c | 32 ++++++++++++++++++++++++++++++--
>>  1 file changed, 30 insertions(+), 2 deletions(-)
>>
>> diff --git a/mm/gup.c b/mm/gup.c
>> index 886d6148d3d0..1572eacf07f4 100644
>> --- a/mm/gup.c
>> +++ b/mm/gup.c
>> @@ -1689,17 +1689,37 @@ struct page *get_dump_page(unsigned long addr)
>>  #endif /* CONFIG_ELF_CORE */
>>  
>>  #ifdef CONFIG_MIGRATION
>> +static int migrate_device_page(unsigned long address,
>> +				struct page *page)
>> +{
>> +	struct vm_area_struct *vma = find_vma(current->mm, address);
>> +	struct vm_fault vmf = {
>> +		.vma = vma,
>> +		.address = address & PAGE_MASK,
>> +		.flags = FAULT_FLAG_USER,
>> +		.pgoff = linear_page_index(vma, address),
>> +		.gfp_mask = GFP_KERNEL,
>> +		.page = page,
>> +	};
>> +	if (page->pgmap && page->pgmap->ops->migrate_to_ram)
>> +		return page->pgmap->ops->migrate_to_ram(&vmf);
> How does this synchronise against pgmap being released? As I understand things
> at this point we're not holding a reference on either the page or pgmap, so
> the page and therefore the pgmap may have been freed.
>
> I think a similar problem exists for device private fault handling as well and
> it has been on my list of things to fix for a while. I think the solution is to
> call try_get_page(), except it doesn't work with device pages due to the whole
> refcount thing. That issue is blocking a fair bit of work now so I've started
> looking into it.

At least the page should have been pinned by the __get_user_pages_locked
call in __gup_longterm_locked. That refcount is dropped in
check_and_migrate_movable_pages when it returns 0 or an error.


>
>> +
>> +	return -EBUSY;
>> +}
>> +
>>  /*
>>   * Check whether all pages are pinnable, if so return number of pages.  If some
>>   * pages are not pinnable, migrate them, and unpin all pages. Return zero if
>>   * pages were migrated, or if some pages were not successfully isolated.
>>   * Return negative error if migration fails.
>>   */
>> -static long check_and_migrate_movable_pages(unsigned long nr_pages,
>> +static long check_and_migrate_movable_pages(unsigned long start,
>> +					    unsigned long nr_pages,
>>  					    struct page **pages,
>>  					    unsigned int gup_flags)
>>  {
>>  	unsigned long i;
>> +	unsigned long page_index;
>>  	unsigned long isolation_error_count = 0;
>>  	bool drain_allow = true;
>>  	LIST_HEAD(movable_page_list);
>> @@ -1720,6 +1740,10 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
>>  		 * If we get a movable page, since we are going to be pinning
>>  		 * these entries, try to move them out if possible.
>>  		 */
>> +		if (is_device_page(head)) {
>> +			page_index = i;
>> +			goto unpin_pages;
>> +		}
>>  		if (!is_pinnable_page(head)) {
>>  			if (PageHuge(head)) {
>>  				if (!isolate_huge_page(head, &movable_page_list))
>> @@ -1750,12 +1774,16 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
>>  	if (list_empty(&movable_page_list) && !isolation_error_count)
>>  		return nr_pages;
>>  
>> +unpin_pages:
>>  	if (gup_flags & FOLL_PIN) {
>>  		unpin_user_pages(pages, nr_pages);
>>  	} else {
>>  		for (i = 0; i < nr_pages; i++)
>>  			put_page(pages[i]);
>>  	}
>> +	if (is_device_page(head))
>> +		return migrate_device_page(start + page_index * PAGE_SIZE, head);
> This isn't very optimal - if a range contains more than one device page (which
> seems likely) we will have to go around the whole gup/check_and_migrate loop
> once for each device page which seems unnecessary. You should be able to either
> build a list or migrate them as you go through the loop. I'm also currently
> looking into how to extend migrate_pages() to support device pages which might
> be useful here too.

We have to do it this way because page->pgmap->ops->migrate_to_ram can
migrate multiple pages per "CPU page fault" to amortize the cost of
migration. The AMD driver typically migrates 2MB at a time. Calling
page->pgmap->ops->migrate_to_ram for each page would probably be even
less optimal.

Regards,
  Felix


>
>> +
>>  	if (!list_empty(&movable_page_list)) {
>>  		ret = migrate_pages(&movable_page_list, alloc_migration_target,
>>  				    NULL, (unsigned long)&mtc, MIGRATE_SYNC,
>> @@ -1798,7 +1826,7 @@ static long __gup_longterm_locked(struct mm_struct *mm,
>>  					     NULL, gup_flags);
>>  		if (rc <= 0)
>>  			break;
>> -		rc = check_and_migrate_movable_pages(rc, pages, gup_flags);
>> +		rc = check_and_migrate_movable_pages(start, rc, pages, gup_flags);
>>  	} while (!rc);
>>  	memalloc_pin_restore(flags);
>>  
>>
>
>
Felix Kuehling Dec. 8, 2021, 5:30 p.m. UTC | #4
Am 2021-12-08 um 11:58 a.m. schrieb Felix Kuehling:
> Am 2021-12-08 um 6:31 a.m. schrieb Alistair Popple:
>> On Tuesday, 7 December 2021 5:52:43 AM AEDT Alex Sierra wrote:
>>> Avoid long term pinning for Coherent device type pages. This could
>>> interfere with their own device memory manager.
>>> If caller tries to get user device coherent pages with PIN_LONGTERM flag
>>> set, those pages will be migrated back to system memory.
>>>
>>> Signed-off-by: Alex Sierra <alex.sierra@amd.com>
>>> ---
>>>  mm/gup.c | 32 ++++++++++++++++++++++++++++++--
>>>  1 file changed, 30 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/mm/gup.c b/mm/gup.c
>>> index 886d6148d3d0..1572eacf07f4 100644
>>> --- a/mm/gup.c
>>> +++ b/mm/gup.c
>>> @@ -1689,17 +1689,37 @@ struct page *get_dump_page(unsigned long addr)
>>>  #endif /* CONFIG_ELF_CORE */
>>>  
>>>  #ifdef CONFIG_MIGRATION
>>> +static int migrate_device_page(unsigned long address,
>>> +				struct page *page)
>>> +{
>>> +	struct vm_area_struct *vma = find_vma(current->mm, address);
>>> +	struct vm_fault vmf = {
>>> +		.vma = vma,
>>> +		.address = address & PAGE_MASK,
>>> +		.flags = FAULT_FLAG_USER,
>>> +		.pgoff = linear_page_index(vma, address),
>>> +		.gfp_mask = GFP_KERNEL,
>>> +		.page = page,
>>> +	};
>>> +	if (page->pgmap && page->pgmap->ops->migrate_to_ram)
>>> +		return page->pgmap->ops->migrate_to_ram(&vmf);
>> How does this synchronise against pgmap being released? As I understand things
>> at this point we're not holding a reference on either the page or pgmap, so
>> the page and therefore the pgmap may have been freed.
>>
>> I think a similar problem exists for device private fault handling as well and
>> it has been on my list of things to fix for a while. I think the solution is to
>> call try_get_page(), except it doesn't work with device pages due to the whole
>> refcount thing. That issue is blocking a fair bit of work now so I've started
>> looking into it.
> At least the page should have been pinned by the __get_user_pages_locked
> call in __gup_longterm_locked. That refcount is dropped in
> check_and_migrate_movable_pages when it returns 0 or an error.

Never mind. We unpin the pages first. Alex, would the migration work if
we unpinned them afterwards? Also, the normal CPU page fault code path
seems to make sure the page is locked (check in pfn_swap_entry_to_page)
before calling migrate_to_ram.

Regards,
  Felix
Sierra Guiza, Alejandro (Alex) Dec. 8, 2021, 6:55 p.m. UTC | #5
On 12/8/2021 11:30 AM, Felix Kuehling wrote:
> Am 2021-12-08 um 11:58 a.m. schrieb Felix Kuehling:
>> Am 2021-12-08 um 6:31 a.m. schrieb Alistair Popple:
>>> On Tuesday, 7 December 2021 5:52:43 AM AEDT Alex Sierra wrote:
>>>> Avoid long term pinning for Coherent device type pages. This could
>>>> interfere with their own device memory manager.
>>>> If caller tries to get user device coherent pages with PIN_LONGTERM flag
>>>> set, those pages will be migrated back to system memory.
>>>>
>>>> Signed-off-by: Alex Sierra <alex.sierra@amd.com>
>>>> ---
>>>>   mm/gup.c | 32 ++++++++++++++++++++++++++++++--
>>>>   1 file changed, 30 insertions(+), 2 deletions(-)
>>>>
>>>> diff --git a/mm/gup.c b/mm/gup.c
>>>> index 886d6148d3d0..1572eacf07f4 100644
>>>> --- a/mm/gup.c
>>>> +++ b/mm/gup.c
>>>> @@ -1689,17 +1689,37 @@ struct page *get_dump_page(unsigned long addr)
>>>>   #endif /* CONFIG_ELF_CORE */
>>>>   
>>>>   #ifdef CONFIG_MIGRATION
>>>> +static int migrate_device_page(unsigned long address,
>>>> +				struct page *page)
>>>> +{
>>>> +	struct vm_area_struct *vma = find_vma(current->mm, address);
>>>> +	struct vm_fault vmf = {
>>>> +		.vma = vma,
>>>> +		.address = address & PAGE_MASK,
>>>> +		.flags = FAULT_FLAG_USER,
>>>> +		.pgoff = linear_page_index(vma, address),
>>>> +		.gfp_mask = GFP_KERNEL,
>>>> +		.page = page,
>>>> +	};
>>>> +	if (page->pgmap && page->pgmap->ops->migrate_to_ram)
>>>> +		return page->pgmap->ops->migrate_to_ram(&vmf);
>>> How does this synchronise against pgmap being released? As I understand things
>>> at this point we're not holding a reference on either the page or pgmap, so
>>> the page and therefore the pgmap may have been freed.
>>>
>>> I think a similar problem exists for device private fault handling as well and
>>> it has been on my list of things to fix for a while. I think the solution is to
>>> call try_get_page(), except it doesn't work with device pages due to the whole
>>> refcount thing. That issue is blocking a fair bit of work now so I've started
>>> looking into it.
>> At least the page should have been pinned by the __get_user_pages_locked
>> call in __gup_longterm_locked. That refcount is dropped in
>> check_and_migrate_movable_pages when it returns 0 or an error.
> Never mind. We unpin the pages first. Alex, would the migration work if
> we unpinned them afterwards? Also, the normal CPU page fault code path
> seems to make sure the page is locked (check in pfn_swap_entry_to_page)
> before calling migrate_to_ram.

No, you can not unpinned after migration. Due to the expected_count VS 
page_count condition at migrate_page_move_mapping, during migrate_page call.

Regards,
Alex Sierra

> Regards,
>    Felix
>
>
Alistair Popple Dec. 9, 2021, 1:45 a.m. UTC | #6
On Thursday, 9 December 2021 12:53:45 AM AEDT Jason Gunthorpe wrote:
> > I think a similar problem exists for device private fault handling as well and
> > it has been on my list of things to fix for a while. I think the solution is to
> > call try_get_page(), except it doesn't work with device pages due to the whole
> > refcount thing. That issue is blocking a fair bit of work now so I've started
> > looking into it.
> 
> Where is this?
 
Nothing posted yet. I've been going through the mailing list and the old
thread[1] to get an understanding of what is left to do. If you have any
suggestions they would be welcome.

[1] https://lore.kernel.org/all/20211014153928.16805-3-alex.sierra@amd.com/
Jason Gunthorpe Dec. 9, 2021, 2:53 a.m. UTC | #7
On Thu, Dec 09, 2021 at 12:45:24PM +1100, Alistair Popple wrote:
> On Thursday, 9 December 2021 12:53:45 AM AEDT Jason Gunthorpe wrote:
> > > I think a similar problem exists for device private fault handling as well and
> > > it has been on my list of things to fix for a while. I think the solution is to
> > > call try_get_page(), except it doesn't work with device pages due to the whole
> > > refcount thing. That issue is blocking a fair bit of work now so I've started
> > > looking into it.
> > 
> > Where is this?
>  
> Nothing posted yet. I've been going through the mailing list and the old
> thread[1] to get an understanding of what is left to do. If you have any
> suggestions they would be welcome.

Oh, that

Joao's series here is the first step:

https://lore.kernel.org/linux-mm/20211202204422.26777-1-joao.m.martins@oracle.com/

I already sent a patch to remove the DRM usage of PUD/PMD -
0d979509539e ("drm/ttm: remove ttm_bo_vm_insert_huge()")

Next, someone needs to change FSDAX to have a folio covering the
ZONE_DEVICE pages before it installs a PUD or PMD. I don't know
anything about FS's to know how to do this at all.

Thus all PUD/PMD entries will point at a head page or larger of a
compound. This is important because all the existing machinery for THP
assumes 1 PUD/PMD means 1 struct page to manipulate.

Then, consolidate all the duplicated code that runs when a page is
removed from a PTE/PMD/PUD etc into a function. Figure out why the
duplications are different to make them the same (I have some rough
patches for this step)

Start with PUD and have zap on PUD call the consolidated function and
make vmf_insert_pfn_pud_prot() accept a struct page not pfn and incr
the refcount. PUD is easy because there is no THP

Then do the same to PMD without breaking the THP code

Then make the PTE also incr the refcount on insert and zap

Exterminate vma_is_special_huge() along the way, there is no such
thing as a special huge VMA without a pud/pmd_special flag so all
things installed here must be struct page and not special.

Then the patches that are already posted are applicable and we can
kill the refcount == 1 stuff. No 0 ref count pages installed in page
tables.

Once all of that is done it is fairly straightforward to remove
pud/pmd/pte_devmap entirely and the pgmap stuff from gup.c

Jason
Alistair Popple Dec. 9, 2021, 10:53 a.m. UTC | #8
On Thursday, 9 December 2021 5:55:26 AM AEDT Sierra Guiza, Alejandro (Alex) wrote:
> 
> On 12/8/2021 11:30 AM, Felix Kuehling wrote:
> > Am 2021-12-08 um 11:58 a.m. schrieb Felix Kuehling:
> >> Am 2021-12-08 um 6:31 a.m. schrieb Alistair Popple:
> >>> On Tuesday, 7 December 2021 5:52:43 AM AEDT Alex Sierra wrote:
> >>>> Avoid long term pinning for Coherent device type pages. This could
> >>>> interfere with their own device memory manager.
> >>>> If caller tries to get user device coherent pages with PIN_LONGTERM flag
> >>>> set, those pages will be migrated back to system memory.
> >>>>
> >>>> Signed-off-by: Alex Sierra <alex.sierra@amd.com>
> >>>> ---
> >>>>   mm/gup.c | 32 ++++++++++++++++++++++++++++++--
> >>>>   1 file changed, 30 insertions(+), 2 deletions(-)
> >>>>
> >>>> diff --git a/mm/gup.c b/mm/gup.c
> >>>> index 886d6148d3d0..1572eacf07f4 100644
> >>>> --- a/mm/gup.c
> >>>> +++ b/mm/gup.c
> >>>> @@ -1689,17 +1689,37 @@ struct page *get_dump_page(unsigned long addr)
> >>>>   #endif /* CONFIG_ELF_CORE */
> >>>>   
> >>>>   #ifdef CONFIG_MIGRATION
> >>>> +static int migrate_device_page(unsigned long address,
> >>>> +				struct page *page)
> >>>> +{
> >>>> +	struct vm_area_struct *vma = find_vma(current->mm, address);
> >>>> +	struct vm_fault vmf = {
> >>>> +		.vma = vma,
> >>>> +		.address = address & PAGE_MASK,
> >>>> +		.flags = FAULT_FLAG_USER,
> >>>> +		.pgoff = linear_page_index(vma, address),
> >>>> +		.gfp_mask = GFP_KERNEL,
> >>>> +		.page = page,
> >>>> +	};
> >>>> +	if (page->pgmap && page->pgmap->ops->migrate_to_ram)
> >>>> +		return page->pgmap->ops->migrate_to_ram(&vmf);
> >>> How does this synchronise against pgmap being released? As I understand things
> >>> at this point we're not holding a reference on either the page or pgmap, so
> >>> the page and therefore the pgmap may have been freed.
> >>>
> >>> I think a similar problem exists for device private fault handling as well and
> >>> it has been on my list of things to fix for a while. I think the solution is to
> >>> call try_get_page(), except it doesn't work with device pages due to the whole
> >>> refcount thing. That issue is blocking a fair bit of work now so I've started
> >>> looking into it.
> >> At least the page should have been pinned by the __get_user_pages_locked
> >> call in __gup_longterm_locked. That refcount is dropped in
> >> check_and_migrate_movable_pages when it returns 0 or an error.
> > Never mind. We unpin the pages first. Alex, would the migration work if
> > we unpinned them afterwards? Also, the normal CPU page fault code path
> > seems to make sure the page is locked (check in pfn_swap_entry_to_page)
> > before calling migrate_to_ram.

I don't think that's true. The check in pfn_swap_entry_to_page() is only for
migration entries:

	BUG_ON(is_migration_entry(entry) && !PageLocked(p));

As this is coherent memory though why do we have to call into a device driver
to do the migration? Couldn't this all be done in the kernel?

> No, you can not unpinned after migration. Due to the expected_count VS 
> page_count condition at migrate_page_move_mapping, during migrate_page call.
> 
> Regards,
> Alex Sierra
> 
> > Regards,
> >    Felix
> >
> >
>
Felix Kuehling Dec. 9, 2021, 4:29 p.m. UTC | #9
Am 2021-12-09 um 5:53 a.m. schrieb Alistair Popple:
> On Thursday, 9 December 2021 5:55:26 AM AEDT Sierra Guiza, Alejandro (Alex) wrote:
>> On 12/8/2021 11:30 AM, Felix Kuehling wrote:
>>> Am 2021-12-08 um 11:58 a.m. schrieb Felix Kuehling:
>>>> Am 2021-12-08 um 6:31 a.m. schrieb Alistair Popple:
>>>>> On Tuesday, 7 December 2021 5:52:43 AM AEDT Alex Sierra wrote:
>>>>>> Avoid long term pinning for Coherent device type pages. This could
>>>>>> interfere with their own device memory manager.
>>>>>> If caller tries to get user device coherent pages with PIN_LONGTERM flag
>>>>>> set, those pages will be migrated back to system memory.
>>>>>>
>>>>>> Signed-off-by: Alex Sierra <alex.sierra@amd.com>
>>>>>> ---
>>>>>>   mm/gup.c | 32 ++++++++++++++++++++++++++++++--
>>>>>>   1 file changed, 30 insertions(+), 2 deletions(-)
>>>>>>
>>>>>> diff --git a/mm/gup.c b/mm/gup.c
>>>>>> index 886d6148d3d0..1572eacf07f4 100644
>>>>>> --- a/mm/gup.c
>>>>>> +++ b/mm/gup.c
>>>>>> @@ -1689,17 +1689,37 @@ struct page *get_dump_page(unsigned long addr)
>>>>>>   #endif /* CONFIG_ELF_CORE */
>>>>>>   
>>>>>>   #ifdef CONFIG_MIGRATION
>>>>>> +static int migrate_device_page(unsigned long address,
>>>>>> +				struct page *page)
>>>>>> +{
>>>>>> +	struct vm_area_struct *vma = find_vma(current->mm, address);
>>>>>> +	struct vm_fault vmf = {
>>>>>> +		.vma = vma,
>>>>>> +		.address = address & PAGE_MASK,
>>>>>> +		.flags = FAULT_FLAG_USER,
>>>>>> +		.pgoff = linear_page_index(vma, address),
>>>>>> +		.gfp_mask = GFP_KERNEL,
>>>>>> +		.page = page,
>>>>>> +	};
>>>>>> +	if (page->pgmap && page->pgmap->ops->migrate_to_ram)
>>>>>> +		return page->pgmap->ops->migrate_to_ram(&vmf);
>>>>> How does this synchronise against pgmap being released? As I understand things
>>>>> at this point we're not holding a reference on either the page or pgmap, so
>>>>> the page and therefore the pgmap may have been freed.
>>>>>
>>>>> I think a similar problem exists for device private fault handling as well and
>>>>> it has been on my list of things to fix for a while. I think the solution is to
>>>>> call try_get_page(), except it doesn't work with device pages due to the whole
>>>>> refcount thing. That issue is blocking a fair bit of work now so I've started
>>>>> looking into it.
>>>> At least the page should have been pinned by the __get_user_pages_locked
>>>> call in __gup_longterm_locked. That refcount is dropped in
>>>> check_and_migrate_movable_pages when it returns 0 or an error.
>>> Never mind. We unpin the pages first. Alex, would the migration work if
>>> we unpinned them afterwards? Also, the normal CPU page fault code path
>>> seems to make sure the page is locked (check in pfn_swap_entry_to_page)
>>> before calling migrate_to_ram.
> I don't think that's true. The check in pfn_swap_entry_to_page() is only for
> migration entries:
>
> 	BUG_ON(is_migration_entry(entry) && !PageLocked(p));
>
> As this is coherent memory though why do we have to call into a device driver
> to do the migration? Couldn't this all be done in the kernel?

I think you're right. I hadn't thought of that mainly because I'm even
less familiar with the non-device migration code. Alex, can you give
that a try? As long as the driver still gets a page-free callback when
the device page is freed, it should work.

Regards,
  Felix


>
>> No, you can not unpinned after migration. Due to the expected_count VS 
>> page_count condition at migrate_page_move_mapping, during migrate_page call.
>>
>> Regards,
>> Alex Sierra
>>
>>> Regards,
>>>    Felix
>>>
>>>
>
>
Alistair Popple Dec. 10, 2021, 1:31 a.m. UTC | #10
On Friday, 10 December 2021 3:54:31 AM AEDT Sierra Guiza, Alejandro (Alex) wrote:
> 
> On 12/9/2021 10:29 AM, Felix Kuehling wrote:
> > Am 2021-12-09 um 5:53 a.m. schrieb Alistair Popple:
> >> On Thursday, 9 December 2021 5:55:26 AM AEDT Sierra Guiza, Alejandro (Alex) wrote:
> >>> On 12/8/2021 11:30 AM, Felix Kuehling wrote:
> >>>> Am 2021-12-08 um 11:58 a.m. schrieb Felix Kuehling:
> >>>>> Am 2021-12-08 um 6:31 a.m. schrieb Alistair Popple:
> >>>>>> On Tuesday, 7 December 2021 5:52:43 AM AEDT Alex Sierra wrote:
> >>>>>>> Avoid long term pinning for Coherent device type pages. This could
> >>>>>>> interfere with their own device memory manager.
> >>>>>>> If caller tries to get user device coherent pages with PIN_LONGTERM flag
> >>>>>>> set, those pages will be migrated back to system memory.
> >>>>>>>
> >>>>>>> Signed-off-by: Alex Sierra<alex.sierra@amd.com>
> >>>>>>> ---
> >>>>>>>    mm/gup.c | 32 ++++++++++++++++++++++++++++++--
> >>>>>>>    1 file changed, 30 insertions(+), 2 deletions(-)
> >>>>>>>
> >>>>>>> diff --git a/mm/gup.c b/mm/gup.c
> >>>>>>> index 886d6148d3d0..1572eacf07f4 100644
> >>>>>>> --- a/mm/gup.c
> >>>>>>> +++ b/mm/gup.c
> >>>>>>> @@ -1689,17 +1689,37 @@ struct page *get_dump_page(unsigned long addr)
> >>>>>>>    #endif /* CONFIG_ELF_CORE */
> >>>>>>>    
> >>>>>>>    #ifdef CONFIG_MIGRATION
> >>>>>>> +static int migrate_device_page(unsigned long address,
> >>>>>>> +				struct page *page)
> >>>>>>> +{
> >>>>>>> +	struct vm_area_struct *vma = find_vma(current->mm, address);
> >>>>>>> +	struct vm_fault vmf = {
> >>>>>>> +		.vma = vma,
> >>>>>>> +		.address = address & PAGE_MASK,
> >>>>>>> +		.flags = FAULT_FLAG_USER,
> >>>>>>> +		.pgoff = linear_page_index(vma, address),
> >>>>>>> +		.gfp_mask = GFP_KERNEL,
> >>>>>>> +		.page = page,
> >>>>>>> +	};
> >>>>>>> +	if (page->pgmap && page->pgmap->ops->migrate_to_ram)
> >>>>>>> +		return page->pgmap->ops->migrate_to_ram(&vmf);
> >>>>>> How does this synchronise against pgmap being released? As I understand things
> >>>>>> at this point we're not holding a reference on either the page or pgmap, so
> >>>>>> the page and therefore the pgmap may have been freed.
> >>>>>>
> >>>>>> I think a similar problem exists for device private fault handling as well and
> >>>>>> it has been on my list of things to fix for a while. I think the solution is to
> >>>>>> call try_get_page(), except it doesn't work with device pages due to the whole
> >>>>>> refcount thing. That issue is blocking a fair bit of work now so I've started
> >>>>>> looking into it.
> >>>>> At least the page should have been pinned by the __get_user_pages_locked
> >>>>> call in __gup_longterm_locked. That refcount is dropped in
> >>>>> check_and_migrate_movable_pages when it returns 0 or an error.
> >>>> Never mind. We unpin the pages first. Alex, would the migration work if
> >>>> we unpinned them afterwards? Also, the normal CPU page fault code path
> >>>> seems to make sure the page is locked (check in pfn_swap_entry_to_page)
> >>>> before calling migrate_to_ram.
> >> I don't think that's true. The check in pfn_swap_entry_to_page() is only for
> >> migration entries:
> >>
> >> 	BUG_ON(is_migration_entry(entry) && !PageLocked(p));
> >>
> >> As this is coherent memory though why do we have to call into a device driver
> >> to do the migration? Couldn't this all be done in the kernel?
> > I think you're right. I hadn't thought of that mainly because I'm even
> > less familiar with the non-device migration code. Alex, can you give
> > that a try? As long as the driver still gets a page-free callback when
> > the device page is freed, it should work.

Yes, you should still get the page-free callback when the migration code drops
the last page reference.

> ACK.Will do

There is currently not really any support for migrating device pages based on
pfn. What I think is needed is something like migrate_pages(), but that API
won't work for a couple of reasons - main one being that it relies on pages
being LRU pages.

I've been working on a series to implement an equivalent of migrate_pages() for
device-private (and by extension device-coherent) pages. It might also be useful
here so I will try and get it posted as an RFC next week.

 - Alistair

> Alex Sierra
> 
> > Regards,
> >    Felix
> >
> >
> >>> No, you can not unpinned after migration. Due to the expected_count VS
> >>> page_count condition at migrate_page_move_mapping, during migrate_page call.
> >>>
> >>> Regards,
> >>> Alex Sierra
> >>>
> >>>> Regards,
> >>>>     Felix
> >>>>
> >>>>
> >>
Felix Kuehling Dec. 10, 2021, 4:39 p.m. UTC | #11
On 2021-12-09 8:31 p.m., Alistair Popple wrote:
> On Friday, 10 December 2021 3:54:31 AM AEDT Sierra Guiza, Alejandro (Alex) wrote:
>> On 12/9/2021 10:29 AM, Felix Kuehling wrote:
>>> Am 2021-12-09 um 5:53 a.m. schrieb Alistair Popple:
>>>> On Thursday, 9 December 2021 5:55:26 AM AEDT Sierra Guiza, Alejandro (Alex) wrote:
>>>>> On 12/8/2021 11:30 AM, Felix Kuehling wrote:
>>>>>> Am 2021-12-08 um 11:58 a.m. schrieb Felix Kuehling:
>>>>>>> Am 2021-12-08 um 6:31 a.m. schrieb Alistair Popple:
>>>>>>>> On Tuesday, 7 December 2021 5:52:43 AM AEDT Alex Sierra wrote:
>>>>>>>>> Avoid long term pinning for Coherent device type pages. This could
>>>>>>>>> interfere with their own device memory manager.
>>>>>>>>> If caller tries to get user device coherent pages with PIN_LONGTERM flag
>>>>>>>>> set, those pages will be migrated back to system memory.
>>>>>>>>>
>>>>>>>>> Signed-off-by: Alex Sierra<alex.sierra@amd.com>
>>>>>>>>> ---
>>>>>>>>>     mm/gup.c | 32 ++++++++++++++++++++++++++++++--
>>>>>>>>>     1 file changed, 30 insertions(+), 2 deletions(-)
>>>>>>>>>
>>>>>>>>> diff --git a/mm/gup.c b/mm/gup.c
>>>>>>>>> index 886d6148d3d0..1572eacf07f4 100644
>>>>>>>>> --- a/mm/gup.c
>>>>>>>>> +++ b/mm/gup.c
>>>>>>>>> @@ -1689,17 +1689,37 @@ struct page *get_dump_page(unsigned long addr)
>>>>>>>>>     #endif /* CONFIG_ELF_CORE */
>>>>>>>>>     
>>>>>>>>>     #ifdef CONFIG_MIGRATION
>>>>>>>>> +static int migrate_device_page(unsigned long address,
>>>>>>>>> +				struct page *page)
>>>>>>>>> +{
>>>>>>>>> +	struct vm_area_struct *vma = find_vma(current->mm, address);
>>>>>>>>> +	struct vm_fault vmf = {
>>>>>>>>> +		.vma = vma,
>>>>>>>>> +		.address = address & PAGE_MASK,
>>>>>>>>> +		.flags = FAULT_FLAG_USER,
>>>>>>>>> +		.pgoff = linear_page_index(vma, address),
>>>>>>>>> +		.gfp_mask = GFP_KERNEL,
>>>>>>>>> +		.page = page,
>>>>>>>>> +	};
>>>>>>>>> +	if (page->pgmap && page->pgmap->ops->migrate_to_ram)
>>>>>>>>> +		return page->pgmap->ops->migrate_to_ram(&vmf);
>>>>>>>> How does this synchronise against pgmap being released? As I understand things
>>>>>>>> at this point we're not holding a reference on either the page or pgmap, so
>>>>>>>> the page and therefore the pgmap may have been freed.
>>>>>>>>
>>>>>>>> I think a similar problem exists for device private fault handling as well and
>>>>>>>> it has been on my list of things to fix for a while. I think the solution is to
>>>>>>>> call try_get_page(), except it doesn't work with device pages due to the whole
>>>>>>>> refcount thing. That issue is blocking a fair bit of work now so I've started
>>>>>>>> looking into it.
>>>>>>> At least the page should have been pinned by the __get_user_pages_locked
>>>>>>> call in __gup_longterm_locked. That refcount is dropped in
>>>>>>> check_and_migrate_movable_pages when it returns 0 or an error.
>>>>>> Never mind. We unpin the pages first. Alex, would the migration work if
>>>>>> we unpinned them afterwards? Also, the normal CPU page fault code path
>>>>>> seems to make sure the page is locked (check in pfn_swap_entry_to_page)
>>>>>> before calling migrate_to_ram.
>>>> I don't think that's true. The check in pfn_swap_entry_to_page() is only for
>>>> migration entries:
>>>>
>>>> 	BUG_ON(is_migration_entry(entry) && !PageLocked(p));
>>>>
>>>> As this is coherent memory though why do we have to call into a device driver
>>>> to do the migration? Couldn't this all be done in the kernel?
>>> I think you're right. I hadn't thought of that mainly because I'm even
>>> less familiar with the non-device migration code. Alex, can you give
>>> that a try? As long as the driver still gets a page-free callback when
>>> the device page is freed, it should work.
> Yes, you should still get the page-free callback when the migration code drops
> the last page reference.
>
>> ACK.Will do
> There is currently not really any support for migrating device pages based on
> pfn. What I think is needed is something like migrate_pages(), but that API
> won't work for a couple of reasons - main one being that it relies on pages
> being LRU pages.
>
> I've been working on a series to implement an equivalent of migrate_pages() for
> device-private (and by extension device-coherent) pages. It might also be useful
> here so I will try and get it posted as an RFC next week.
If we want to make progress on this patch series in the shorter term, we 
could just fail get_user_pages with FOLL_LONGTERM for DEVICE_COHERENT 
pages. Then add the migration support when your patch series is ready.

Regards,
   Felix


>
>   - Alistair
>
>> Alex Sierra
>>
>>> Regards,
>>>     Felix
>>>
>>>
>>>>> No, you can not unpinned after migration. Due to the expected_count VS
>>>>> page_count condition at migrate_page_move_mapping, during migrate_page call.
>>>>>
>>>>> Regards,
>>>>> Alex Sierra
>>>>>
>>>>>> Regards,
>>>>>>      Felix
>>>>>>
>>>>>>
>
>
diff mbox series

Patch

diff --git a/mm/gup.c b/mm/gup.c
index 886d6148d3d0..1572eacf07f4 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1689,17 +1689,37 @@  struct page *get_dump_page(unsigned long addr)
 #endif /* CONFIG_ELF_CORE */
 
 #ifdef CONFIG_MIGRATION
+static int migrate_device_page(unsigned long address,
+				struct page *page)
+{
+	struct vm_area_struct *vma = find_vma(current->mm, address);
+	struct vm_fault vmf = {
+		.vma = vma,
+		.address = address & PAGE_MASK,
+		.flags = FAULT_FLAG_USER,
+		.pgoff = linear_page_index(vma, address),
+		.gfp_mask = GFP_KERNEL,
+		.page = page,
+	};
+	if (page->pgmap && page->pgmap->ops->migrate_to_ram)
+		return page->pgmap->ops->migrate_to_ram(&vmf);
+
+	return -EBUSY;
+}
+
 /*
  * Check whether all pages are pinnable, if so return number of pages.  If some
  * pages are not pinnable, migrate them, and unpin all pages. Return zero if
  * pages were migrated, or if some pages were not successfully isolated.
  * Return negative error if migration fails.
  */
-static long check_and_migrate_movable_pages(unsigned long nr_pages,
+static long check_and_migrate_movable_pages(unsigned long start,
+					    unsigned long nr_pages,
 					    struct page **pages,
 					    unsigned int gup_flags)
 {
 	unsigned long i;
+	unsigned long page_index;
 	unsigned long isolation_error_count = 0;
 	bool drain_allow = true;
 	LIST_HEAD(movable_page_list);
@@ -1720,6 +1740,10 @@  static long check_and_migrate_movable_pages(unsigned long nr_pages,
 		 * If we get a movable page, since we are going to be pinning
 		 * these entries, try to move them out if possible.
 		 */
+		if (is_device_page(head)) {
+			page_index = i;
+			goto unpin_pages;
+		}
 		if (!is_pinnable_page(head)) {
 			if (PageHuge(head)) {
 				if (!isolate_huge_page(head, &movable_page_list))
@@ -1750,12 +1774,16 @@  static long check_and_migrate_movable_pages(unsigned long nr_pages,
 	if (list_empty(&movable_page_list) && !isolation_error_count)
 		return nr_pages;
 
+unpin_pages:
 	if (gup_flags & FOLL_PIN) {
 		unpin_user_pages(pages, nr_pages);
 	} else {
 		for (i = 0; i < nr_pages; i++)
 			put_page(pages[i]);
 	}
+	if (is_device_page(head))
+		return migrate_device_page(start + page_index * PAGE_SIZE, head);
+
 	if (!list_empty(&movable_page_list)) {
 		ret = migrate_pages(&movable_page_list, alloc_migration_target,
 				    NULL, (unsigned long)&mtc, MIGRATE_SYNC,
@@ -1798,7 +1826,7 @@  static long __gup_longterm_locked(struct mm_struct *mm,
 					     NULL, gup_flags);
 		if (rc <= 0)
 			break;
-		rc = check_and_migrate_movable_pages(rc, pages, gup_flags);
+		rc = check_and_migrate_movable_pages(start, rc, pages, gup_flags);
 	} while (!rc);
 	memalloc_pin_restore(flags);