diff mbox

[v5,03/29] vfio: powerpc/spapr: Check that TCE page size is equal to it_page_size

Message ID 1425910045-26167-4-git-send-email-aik@ozlabs.ru (mailing list archive)
State Superseded
Delegated to: Benjamin Herrenschmidt
Headers show

Commit Message

Alexey Kardashevskiy March 9, 2015, 2:06 p.m. UTC
This checks that the TCE table page size is not bigger that the size of
a page we just pinned and going to put its physical address to the table.

Otherwise the hardware gets unwanted access to physical memory between
the end of the actual page and the end of the aligned up TCE page.

Since compound_order() and compound_head() work correctly on non-huge
pages, there is no need for additional check whether the page is huge.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
---
Changes:
v4:
* s/tce_check_page_size/tce_page_is_contained/
---
 drivers/vfio/vfio_iommu_spapr_tce.c | 22 ++++++++++++++++++++++
 1 file changed, 22 insertions(+)

Comments

Alex Williamson March 10, 2015, 7:56 p.m. UTC | #1
On Tue, 2015-03-10 at 01:06 +1100, Alexey Kardashevskiy wrote:
> This checks that the TCE table page size is not bigger that the size of
> a page we just pinned and going to put its physical address to the table.
> 
> Otherwise the hardware gets unwanted access to physical memory between
> the end of the actual page and the end of the aligned up TCE page.
> 
> Since compound_order() and compound_head() work correctly on non-huge
> pages, there is no need for additional check whether the page is huge.
> 
> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
> ---
> Changes:
> v4:
> * s/tce_check_page_size/tce_page_is_contained/
> ---
>  drivers/vfio/vfio_iommu_spapr_tce.c | 22 ++++++++++++++++++++++
>  1 file changed, 22 insertions(+)
> 
> diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
> index 756831f..91e7599 100644
> --- a/drivers/vfio/vfio_iommu_spapr_tce.c
> +++ b/drivers/vfio/vfio_iommu_spapr_tce.c
> @@ -49,6 +49,22 @@ struct tce_container {
>  	bool enabled;
>  };
>  
> +static bool tce_page_is_contained(struct page *page, unsigned page_shift)
> +{
> +	unsigned shift;
> +
> +	/*
> +	 * Check that the TCE table granularity is not bigger than the size of
> +	 * a page we just found. Otherwise the hardware can get access to
> +	 * a bigger memory chunk that it should.
> +	 */
> +	shift = PAGE_SHIFT + compound_order(compound_head(page));
> +	if (shift >= page_shift)
> +		return true;
> +
> +	return false;

nit, simplified:

return (PAGE_SHIFT + compound_order(compound_head(page) >= page_shift);

> +}
> +
>  static int tce_iommu_enable(struct tce_container *container)
>  {
>  	int ret = 0;
> @@ -197,6 +213,12 @@ static long tce_iommu_build(struct tce_container *container,
>  			ret = -EFAULT;
>  			break;
>  		}
> +
> +		if (!tce_page_is_contained(page, tbl->it_page_shift)) {
> +			ret = -EPERM;
> +			break;
> +		}
> +
>  		hva = (unsigned long) page_address(page) +
>  			(tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK);
>
Alexey Kardashevskiy March 10, 2015, 10:57 p.m. UTC | #2
On 03/11/2015 06:56 AM, Alex Williamson wrote:
> On Tue, 2015-03-10 at 01:06 +1100, Alexey Kardashevskiy wrote:
>> This checks that the TCE table page size is not bigger that the size of
>> a page we just pinned and going to put its physical address to the table.
>>
>> Otherwise the hardware gets unwanted access to physical memory between
>> the end of the actual page and the end of the aligned up TCE page.
>>
>> Since compound_order() and compound_head() work correctly on non-huge
>> pages, there is no need for additional check whether the page is huge.
>>
>> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
>> ---
>> Changes:
>> v4:
>> * s/tce_check_page_size/tce_page_is_contained/
>> ---
>>   drivers/vfio/vfio_iommu_spapr_tce.c | 22 ++++++++++++++++++++++
>>   1 file changed, 22 insertions(+)
>>
>> diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
>> index 756831f..91e7599 100644
>> --- a/drivers/vfio/vfio_iommu_spapr_tce.c
>> +++ b/drivers/vfio/vfio_iommu_spapr_tce.c
>> @@ -49,6 +49,22 @@ struct tce_container {
>>   	bool enabled;
>>   };
>>
>> +static bool tce_page_is_contained(struct page *page, unsigned page_shift)
>> +{
>> +	unsigned shift;
>> +
>> +	/*
>> +	 * Check that the TCE table granularity is not bigger than the size of
>> +	 * a page we just found. Otherwise the hardware can get access to
>> +	 * a bigger memory chunk that it should.
>> +	 */
>> +	shift = PAGE_SHIFT + compound_order(compound_head(page));
>> +	if (shift >= page_shift)
>> +		return true;
>> +
>> +	return false;
>
> nit, simplified:
>
> return (PAGE_SHIFT + compound_order(compound_head(page) >= page_shift);

This won't be "bool" though. This will (I'll do this)

shift = PAGE_SHIFT + compound_order(compound_head(page));
return (shift >= page_shift);




>
>> +}
>> +
>>   static int tce_iommu_enable(struct tce_container *container)
>>   {
>>   	int ret = 0;
>> @@ -197,6 +213,12 @@ static long tce_iommu_build(struct tce_container *container,
>>   			ret = -EFAULT;
>>   			break;
>>   		}
>> +
>> +		if (!tce_page_is_contained(page, tbl->it_page_shift)) {
>> +			ret = -EPERM;
>> +			break;
>> +		}
>> +
>>   		hva = (unsigned long) page_address(page) +
>>   			(tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK);
>>
>
>
>
Alex Williamson March 10, 2015, 11:03 p.m. UTC | #3
On Wed, 2015-03-11 at 09:57 +1100, Alexey Kardashevskiy wrote:
> On 03/11/2015 06:56 AM, Alex Williamson wrote:
> > On Tue, 2015-03-10 at 01:06 +1100, Alexey Kardashevskiy wrote:
> >> This checks that the TCE table page size is not bigger that the size of
> >> a page we just pinned and going to put its physical address to the table.
> >>
> >> Otherwise the hardware gets unwanted access to physical memory between
> >> the end of the actual page and the end of the aligned up TCE page.
> >>
> >> Since compound_order() and compound_head() work correctly on non-huge
> >> pages, there is no need for additional check whether the page is huge.
> >>
> >> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
> >> ---
> >> Changes:
> >> v4:
> >> * s/tce_check_page_size/tce_page_is_contained/
> >> ---
> >>   drivers/vfio/vfio_iommu_spapr_tce.c | 22 ++++++++++++++++++++++
> >>   1 file changed, 22 insertions(+)
> >>
> >> diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
> >> index 756831f..91e7599 100644
> >> --- a/drivers/vfio/vfio_iommu_spapr_tce.c
> >> +++ b/drivers/vfio/vfio_iommu_spapr_tce.c
> >> @@ -49,6 +49,22 @@ struct tce_container {
> >>   	bool enabled;
> >>   };
> >>
> >> +static bool tce_page_is_contained(struct page *page, unsigned page_shift)
> >> +{
> >> +	unsigned shift;
> >> +
> >> +	/*
> >> +	 * Check that the TCE table granularity is not bigger than the size of
> >> +	 * a page we just found. Otherwise the hardware can get access to
> >> +	 * a bigger memory chunk that it should.
> >> +	 */
> >> +	shift = PAGE_SHIFT + compound_order(compound_head(page));
> >> +	if (shift >= page_shift)
> >> +		return true;
> >> +
> >> +	return false;
> >
> > nit, simplified:
> >
> > return (PAGE_SHIFT + compound_order(compound_head(page) >= page_shift);
> 
> This won't be "bool" though.

Yes, it will.

>  This will (I'll do this)
> 
> shift = PAGE_SHIFT + compound_order(compound_head(page));
> return (shift >= page_shift);
> 
> 
> 
> 
> >
> >> +}
> >> +
> >>   static int tce_iommu_enable(struct tce_container *container)
> >>   {
> >>   	int ret = 0;
> >> @@ -197,6 +213,12 @@ static long tce_iommu_build(struct tce_container *container,
> >>   			ret = -EFAULT;
> >>   			break;
> >>   		}
> >> +
> >> +		if (!tce_page_is_contained(page, tbl->it_page_shift)) {
> >> +			ret = -EPERM;
> >> +			break;
> >> +		}
> >> +
> >>   		hva = (unsigned long) page_address(page) +
> >>   			(tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK);
> >>
> >
> >
> >
> 
>
Benjamin Herrenschmidt March 10, 2015, 11:14 p.m. UTC | #4
On Tue, 2015-03-10 at 17:03 -0600, Alex Williamson wrote:
> > > return (PAGE_SHIFT + compound_order(compound_head(page) >= page_shift);
> > 
> > This won't be "bool" though.
> 
> Yes, it will.

Don't you have your parenthesis in the wrong place, Alex ? :-)

> >  This will (I'll do this)
> > 
> > shift = PAGE_SHIFT + compound_order(compound_head(page));
> > return (shift >= page_shift);
Alex Williamson March 10, 2015, 11:34 p.m. UTC | #5
On Wed, 2015-03-11 at 10:14 +1100, Benjamin Herrenschmidt wrote:
> On Tue, 2015-03-10 at 17:03 -0600, Alex Williamson wrote:
> > > > return (PAGE_SHIFT + compound_order(compound_head(page) >= page_shift);
> > > 
> > > This won't be "bool" though.
> > 
> > Yes, it will.
> 
> Don't you have your parenthesis in the wrong place, Alex ? :-)

Probably, but the compiler will warn about that.

> > >  This will (I'll do this)
> > > 
> > > shift = PAGE_SHIFT + compound_order(compound_head(page));
> > > return (shift >= page_shift);
> 
>
Alexey Kardashevskiy March 10, 2015, 11:45 p.m. UTC | #6
On 03/11/2015 10:03 AM, Alex Williamson wrote:
> On Wed, 2015-03-11 at 09:57 +1100, Alexey Kardashevskiy wrote:
>> On 03/11/2015 06:56 AM, Alex Williamson wrote:
>>> On Tue, 2015-03-10 at 01:06 +1100, Alexey Kardashevskiy wrote:
>>>> This checks that the TCE table page size is not bigger that the size of
>>>> a page we just pinned and going to put its physical address to the table.
>>>>
>>>> Otherwise the hardware gets unwanted access to physical memory between
>>>> the end of the actual page and the end of the aligned up TCE page.
>>>>
>>>> Since compound_order() and compound_head() work correctly on non-huge
>>>> pages, there is no need for additional check whether the page is huge.
>>>>
>>>> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
>>>> ---
>>>> Changes:
>>>> v4:
>>>> * s/tce_check_page_size/tce_page_is_contained/
>>>> ---
>>>>    drivers/vfio/vfio_iommu_spapr_tce.c | 22 ++++++++++++++++++++++
>>>>    1 file changed, 22 insertions(+)
>>>>
>>>> diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
>>>> index 756831f..91e7599 100644
>>>> --- a/drivers/vfio/vfio_iommu_spapr_tce.c
>>>> +++ b/drivers/vfio/vfio_iommu_spapr_tce.c
>>>> @@ -49,6 +49,22 @@ struct tce_container {
>>>>    	bool enabled;
>>>>    };
>>>>
>>>> +static bool tce_page_is_contained(struct page *page, unsigned page_shift)
>>>> +{
>>>> +	unsigned shift;
>>>> +
>>>> +	/*
>>>> +	 * Check that the TCE table granularity is not bigger than the size of
>>>> +	 * a page we just found. Otherwise the hardware can get access to
>>>> +	 * a bigger memory chunk that it should.
>>>> +	 */
>>>> +	shift = PAGE_SHIFT + compound_order(compound_head(page));
>>>> +	if (shift >= page_shift)
>>>> +		return true;
>>>> +
>>>> +	return false;
>>>
>>> nit, simplified:
>>>
>>> return (PAGE_SHIFT + compound_order(compound_head(page) >= page_shift);
>>
>> This won't be "bool" though.
>
> Yes, it will.

Ah, misread as "... - page_shift". And you missed one bracket :)


>
>>   This will (I'll do this)
>>
>> shift = PAGE_SHIFT + compound_order(compound_head(page));
>> return (shift >= page_shift);
>>
>>
>>
>>
>>>
>>>> +}
>>>> +
>>>>    static int tce_iommu_enable(struct tce_container *container)
>>>>    {
>>>>    	int ret = 0;
>>>> @@ -197,6 +213,12 @@ static long tce_iommu_build(struct tce_container *container,
>>>>    			ret = -EFAULT;
>>>>    			break;
>>>>    		}
>>>> +
>>>> +		if (!tce_page_is_contained(page, tbl->it_page_shift)) {
>>>> +			ret = -EPERM;
>>>> +			break;
>>>> +		}
>>>> +
>>>>    		hva = (unsigned long) page_address(page) +
>>>>    			(tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK);
>>>>
>>>
>>>
>>>
>>
>>
>
>
>
diff mbox

Patch

diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 756831f..91e7599 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -49,6 +49,22 @@  struct tce_container {
 	bool enabled;
 };
 
+static bool tce_page_is_contained(struct page *page, unsigned page_shift)
+{
+	unsigned shift;
+
+	/*
+	 * Check that the TCE table granularity is not bigger than the size of
+	 * a page we just found. Otherwise the hardware can get access to
+	 * a bigger memory chunk that it should.
+	 */
+	shift = PAGE_SHIFT + compound_order(compound_head(page));
+	if (shift >= page_shift)
+		return true;
+
+	return false;
+}
+
 static int tce_iommu_enable(struct tce_container *container)
 {
 	int ret = 0;
@@ -197,6 +213,12 @@  static long tce_iommu_build(struct tce_container *container,
 			ret = -EFAULT;
 			break;
 		}
+
+		if (!tce_page_is_contained(page, tbl->it_page_shift)) {
+			ret = -EPERM;
+			break;
+		}
+
 		hva = (unsigned long) page_address(page) +
 			(tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK);