Patchwork spapr: Add "memop" hypercall

login
register
mail settings
Submitter Benjamin Herrenschmidt
Date May 21, 2012, 7:24 a.m.
Message ID <1337585042.2779.4.camel@pasglop>
Download mbox | patch
Permalink /patch/160329/
State New
Headers show

Comments

Benjamin Herrenschmidt - May 21, 2012, 7:24 a.m.
This adds a kvm-specific hypervisor call to the pseries machine
which allows to do what amounts to memmove, memcpy and xor over
regions of physical memory such as the framebuffer.

This is the simplest way to get usable framebuffer speed from
SLOF since the framebuffer isn't mapped in the VRMA and so would
otherwise require an hcall per 8 bytes access.

The performance is still not great but usable, and can be improved
with a more complex implementation of the hcall itself if needed.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---
 hw/spapr.h       |    3 ++-
 hw/spapr_hcall.c |   49 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 51 insertions(+), 1 deletion(-)



--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Alexander Graf - May 21, 2012, 8:38 a.m.
On 21.05.2012, at 09:24, Benjamin Herrenschmidt wrote:

> This adds a kvm-specific hypervisor call to the pseries machine
> which allows to do what amounts to memmove, memcpy and xor over
> regions of physical memory such as the framebuffer.
> 
> This is the simplest way to get usable framebuffer speed from
> SLOF since the framebuffer isn't mapped in the VRMA and so would
> otherwise require an hcall per 8 bytes access.
> 
> The performance is still not great but usable, and can be improved
> with a more complex implementation of the hcall itself if needed.

Shouldn't we try and implement the same thing in QEMU as well to make things compatible? Also, what would the downside be of always going through QEMU for this hypercall?

The reason I'm asking is that we might want to do memmove,memcpy,xor on MMIO memory, which then user space could easily do, but which incurs quite some headaches to do it from inside KVM.


Alex

> 
> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
> ---
> hw/spapr.h       |    3 ++-
> hw/spapr_hcall.c |   49 +++++++++++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 51 insertions(+), 1 deletion(-)
> 
> diff --git a/hw/spapr.h b/hw/spapr.h
> index 7c497aa..0343f33 100644
> --- a/hw/spapr.h
> +++ b/hw/spapr.h
> @@ -264,7 +264,8 @@ typedef struct sPAPREnvironment {
>  */
> #define KVMPPC_HCALL_BASE       0xf000
> #define KVMPPC_H_RTAS           (KVMPPC_HCALL_BASE + 0x0)
> -#define KVMPPC_HCALL_MAX        KVMPPC_H_RTAS
> +#define KVMPPC_H_LOGICAL_MEMOP  (KVMPPC_HCALL_BASE + 0x1)
> +#define KVMPPC_HCALL_MAX        KVMPPC_H_LOGICAL_MEMOP
> 
> extern sPAPREnvironment *spapr;
> 
> diff --git a/hw/spapr_hcall.c b/hw/spapr_hcall.c
> index 94bb504..c5c26dc 100644
> --- a/hw/spapr_hcall.c
> +++ b/hw/spapr_hcall.c
> @@ -608,6 +608,54 @@ static target_ulong h_logical_store(CPUPPCState *env, sPAPREnvironment *spapr,
>     return H_PARAMETER;
> }
> 
> +static target_ulong h_logical_memop(CPUPPCState *env, sPAPREnvironment *spapr,
> +				    target_ulong opcode, target_ulong *args)
> +{
> +    target_ulong dst   = args[0]; /* Destination address */
> +    target_ulong src   = args[1]; /* Source address */
> +    target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
> +    target_ulong count = args[3]; /* Element count */
> +    target_ulong op    = args[4]; /* 0 = copy, 1 = invert */
> +    uint64_t tmp;
> +    unsigned int mask = (1 << esize) - 1;
> +    int step = 1 << esize;
> +
> +    if (count > 0x80000000)
> +	return H_PARAMETER;
> +
> +    if ((dst & mask) || (src & mask))
> +	return H_PARAMETER;
> +
> +    if (dst >= src && dst < (src + (count << esize))) {
> +	    dst = dst + ((count - 1) << esize);
> +	    src = src + ((count - 1) << esize);
> +	    step = -step;
> +    }
> +
> +    while(count--) {
> +        switch (esize) {
> +        case 0: tmp = ldub_phys(src); break;
> +        case 1: tmp = lduw_phys(src); break;
> +        case 2: tmp = ldl_phys(src);  break;
> +        case 3: tmp = ldq_phys(src);  break;
> +        default:
> +        return H_PARAMETER;
> +	}
> +        if (op)
> +		tmp = ~tmp;
> +        switch (esize) {
> +        case 0: stb_phys(dst, tmp); break;
> +        case 1: stw_phys(dst, tmp); break;
> +        case 2: stl_phys(dst, tmp); break;
> +        case 3: stq_phys(dst, tmp); break;
> +	}
> +	dst = dst + step;
> +	src = src + step;
> +    }
> +
> +    return H_SUCCESS;
> +}
> +
> static target_ulong h_logical_icbi(CPUPPCState *env, sPAPREnvironment *spapr,
>                                    target_ulong opcode, target_ulong *args)
> {
> @@ -700,6 +748,7 @@ static void hypercall_register_types(void)
>     spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
>     spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
>     spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
> +    spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
> 
>     /* qemu/KVM-PPC specific hcalls */
>     spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
> 
> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Benjamin Herrenschmidt - May 21, 2012, 8:59 a.m.
On Mon, 2012-05-21 at 10:38 +0200, Alexander Graf wrote:
> On 21.05.2012, at 09:24, Benjamin Herrenschmidt wrote:
> 
> > This adds a kvm-specific hypervisor call to the pseries machine
> > which allows to do what amounts to memmove, memcpy and xor over
> > regions of physical memory such as the framebuffer.
> > 
> > This is the simplest way to get usable framebuffer speed from
> > SLOF since the framebuffer isn't mapped in the VRMA and so would
> > otherwise require an hcall per 8 bytes access.
> > 
> > The performance is still not great but usable, and can be improved
> > with a more complex implementation of the hcall itself if needed.
> 
> Shouldn't we try and implement the same thing in QEMU as well to make
> things compatible? Also, what would the downside be of always going
> through QEMU for this hypercall?
> 
> The reason I'm asking is that we might want to do memmove,memcpy,xor
> on MMIO memory, which then user space could easily do, but which
> incurs quite some headaches to do it from inside KVM.

I don't understand your question ... this is implemented in qemu...

The problem with SLOF is specific to -M pseries, because it runs in
"guest" real mode, it doesn't have access to device memory unless it
does it via hcalls.

Cheers,
Ben.

> 
> Alex
> 
> > 
> > Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
> > ---
> > hw/spapr.h       |    3 ++-
> > hw/spapr_hcall.c |   49 +++++++++++++++++++++++++++++++++++++++++++++++++
> > 2 files changed, 51 insertions(+), 1 deletion(-)
> > 
> > diff --git a/hw/spapr.h b/hw/spapr.h
> > index 7c497aa..0343f33 100644
> > --- a/hw/spapr.h
> > +++ b/hw/spapr.h
> > @@ -264,7 +264,8 @@ typedef struct sPAPREnvironment {
> >  */
> > #define KVMPPC_HCALL_BASE       0xf000
> > #define KVMPPC_H_RTAS           (KVMPPC_HCALL_BASE + 0x0)
> > -#define KVMPPC_HCALL_MAX        KVMPPC_H_RTAS
> > +#define KVMPPC_H_LOGICAL_MEMOP  (KVMPPC_HCALL_BASE + 0x1)
> > +#define KVMPPC_HCALL_MAX        KVMPPC_H_LOGICAL_MEMOP
> > 
> > extern sPAPREnvironment *spapr;
> > 
> > diff --git a/hw/spapr_hcall.c b/hw/spapr_hcall.c
> > index 94bb504..c5c26dc 100644
> > --- a/hw/spapr_hcall.c
> > +++ b/hw/spapr_hcall.c
> > @@ -608,6 +608,54 @@ static target_ulong h_logical_store(CPUPPCState *env, sPAPREnvironment *spapr,
> >     return H_PARAMETER;
> > }
> > 
> > +static target_ulong h_logical_memop(CPUPPCState *env, sPAPREnvironment *spapr,
> > +				    target_ulong opcode, target_ulong *args)
> > +{
> > +    target_ulong dst   = args[0]; /* Destination address */
> > +    target_ulong src   = args[1]; /* Source address */
> > +    target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
> > +    target_ulong count = args[3]; /* Element count */
> > +    target_ulong op    = args[4]; /* 0 = copy, 1 = invert */
> > +    uint64_t tmp;
> > +    unsigned int mask = (1 << esize) - 1;
> > +    int step = 1 << esize;
> > +
> > +    if (count > 0x80000000)
> > +	return H_PARAMETER;
> > +
> > +    if ((dst & mask) || (src & mask))
> > +	return H_PARAMETER;
> > +
> > +    if (dst >= src && dst < (src + (count << esize))) {
> > +	    dst = dst + ((count - 1) << esize);
> > +	    src = src + ((count - 1) << esize);
> > +	    step = -step;
> > +    }
> > +
> > +    while(count--) {
> > +        switch (esize) {
> > +        case 0: tmp = ldub_phys(src); break;
> > +        case 1: tmp = lduw_phys(src); break;
> > +        case 2: tmp = ldl_phys(src);  break;
> > +        case 3: tmp = ldq_phys(src);  break;
> > +        default:
> > +        return H_PARAMETER;
> > +	}
> > +        if (op)
> > +		tmp = ~tmp;
> > +        switch (esize) {
> > +        case 0: stb_phys(dst, tmp); break;
> > +        case 1: stw_phys(dst, tmp); break;
> > +        case 2: stl_phys(dst, tmp); break;
> > +        case 3: stq_phys(dst, tmp); break;
> > +	}
> > +	dst = dst + step;
> > +	src = src + step;
> > +    }
> > +
> > +    return H_SUCCESS;
> > +}
> > +
> > static target_ulong h_logical_icbi(CPUPPCState *env, sPAPREnvironment *spapr,
> >                                    target_ulong opcode, target_ulong *args)
> > {
> > @@ -700,6 +748,7 @@ static void hypercall_register_types(void)
> >     spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
> >     spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
> >     spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
> > +    spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
> > 
> >     /* qemu/KVM-PPC specific hcalls */
> >     spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
> > 
> > 
> > --
> > To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
> > the body of a message to majordomo@vger.kernel.org
> > More majordomo info at  http://vger.kernel.org/majordomo-info.html


--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Alexander Graf - May 21, 2012, 9:06 a.m.
On 21.05.2012, at 10:59, Benjamin Herrenschmidt wrote:

> On Mon, 2012-05-21 at 10:38 +0200, Alexander Graf wrote:
>> On 21.05.2012, at 09:24, Benjamin Herrenschmidt wrote:
>> 
>>> This adds a kvm-specific hypervisor call to the pseries machine
>>> which allows to do what amounts to memmove, memcpy and xor over
>>> regions of physical memory such as the framebuffer.
>>> 
>>> This is the simplest way to get usable framebuffer speed from
>>> SLOF since the framebuffer isn't mapped in the VRMA and so would
>>> otherwise require an hcall per 8 bytes access.
>>> 
>>> The performance is still not great but usable, and can be improved
>>> with a more complex implementation of the hcall itself if needed.
>> 
>> Shouldn't we try and implement the same thing in QEMU as well to make
>> things compatible? Also, what would the downside be of always going
>> through QEMU for this hypercall?
>> 
>> The reason I'm asking is that we might want to do memmove,memcpy,xor
>> on MMIO memory, which then user space could easily do, but which
>> incurs quite some headaches to do it from inside KVM.
> 
> I don't understand your question ... this is implemented in qemu...
> 
> The problem with SLOF is specific to -M pseries, because it runs in
> "guest" real mode, it doesn't have access to device memory unless it
> does it via hcalls.

Yikes. Call me stupid.


Alex

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Avi Kivity - May 21, 2012, 9:47 a.m.
On 05/21/2012 10:24 AM, Benjamin Herrenschmidt wrote:
> This adds a kvm-specific hypervisor call to the pseries machine
> which allows to do what amounts to memmove, memcpy and xor over
> regions of physical memory such as the framebuffer.
>
> This is the simplest way to get usable framebuffer speed from
> SLOF since the framebuffer isn't mapped in the VRMA and so would
> otherwise require an hcall per 8 bytes access.
>
> The performance is still not great but usable, and can be improved
> with a more complex implementation of the hcall itself if needed.
>
> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
> ---
>  hw/spapr.h       |    3 ++-
>  hw/spapr_hcall.c |   49 +++++++++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 51 insertions(+), 1 deletion(-)
>

Shouldn't these be documented somewhere?
Benjamin Herrenschmidt - May 21, 2012, 10:04 a.m.
On Mon, 2012-05-21 at 12:47 +0300, Avi Kivity wrote:
> On 05/21/2012 10:24 AM, Benjamin Herrenschmidt wrote:
> > This adds a kvm-specific hypervisor call to the pseries machine
> > which allows to do what amounts to memmove, memcpy and xor over
> > regions of physical memory such as the framebuffer.
> >
> > This is the simplest way to get usable framebuffer speed from
> > SLOF since the framebuffer isn't mapped in the VRMA and so would
> > otherwise require an hcall per 8 bytes access.
> >
> > The performance is still not great but usable, and can be improved
> > with a more complex implementation of the hcall itself if needed.
> >
> > Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
> > ---
> >  hw/spapr.h       |    3 ++-
> >  hw/spapr_hcall.c |   49 +++++++++++++++++++++++++++++++++++++++++++++++++
> >  2 files changed, 51 insertions(+), 1 deletion(-)
> >
> 
> Shouldn't these be documented somewhere?

Hrm, that's a good point. So far we've been mostly implementing the PAPR
spec so the documentation exists.

Before that patch we only had one "special" hcall not in PAPR, which we
use for the RTAS firmware calls (this part of the FW normally lives
inside the guest on real pHyp and communicates with the hypervisor using
private hcalls, on qemu, we just turn all the RTAS calls to qemu via a
single H_RTAS multiplexer). We haven't documented it.

Now I'm adding another one, so yes, it's looking like a trend :-) I'll
look into it, at this stage with only those two, adding some comments in
the header might be plenty enough.

Cheers,
Ben.


--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Avi Kivity - May 21, 2012, 10:07 a.m.
On 05/21/2012 01:04 PM, Benjamin Herrenschmidt wrote:
> On Mon, 2012-05-21 at 12:47 +0300, Avi Kivity wrote:
> > On 05/21/2012 10:24 AM, Benjamin Herrenschmidt wrote:
> > > This adds a kvm-specific hypervisor call to the pseries machine
> > > which allows to do what amounts to memmove, memcpy and xor over
> > > regions of physical memory such as the framebuffer.
> > >
> > > This is the simplest way to get usable framebuffer speed from
> > > SLOF since the framebuffer isn't mapped in the VRMA and so would
> > > otherwise require an hcall per 8 bytes access.
> > >
> > > The performance is still not great but usable, and can be improved
> > > with a more complex implementation of the hcall itself if needed.
> > >
> > > Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
> > > ---
> > >  hw/spapr.h       |    3 ++-
> > >  hw/spapr_hcall.c |   49 +++++++++++++++++++++++++++++++++++++++++++++++++
> > >  2 files changed, 51 insertions(+), 1 deletion(-)
> > >
> > 
> > Shouldn't these be documented somewhere?
>
> Hrm, that's a good point. So far we've been mostly implementing the PAPR
> spec so the documentation exists.
>
> Before that patch we only had one "special" hcall not in PAPR, which we
> use for the RTAS firmware calls (this part of the FW normally lives
> inside the guest on real pHyp and communicates with the hypervisor using
> private hcalls, on qemu, we just turn all the RTAS calls to qemu via a
> single H_RTAS multiplexer). We haven't documented it.
>
> Now I'm adding another one, so yes, it's looking like a trend :-) I'll
> look into it, at this stage with only those two, adding some comments in
> the header might be plenty enough.

Documentation/virtual/kvm/ppc-pv.txt is a nice central place for these. 
It would be even better if you could add them to the spec.
Benjamin Herrenschmidt - May 21, 2012, 11:48 a.m.
On Mon, 2012-05-21 at 13:07 +0300, Avi Kivity wrote:
> > Now I'm adding another one, so yes, it's looking like a trend :-) I'll
> > look into it, at this stage with only those two, adding some comments in
> > the header might be plenty enough.
> 
> Documentation/virtual/kvm/ppc-pv.txt is a nice central place for these. 
> It would be even better if you could add them to the spec.

They don't quite fit with the other PV calls in there which use a
different HV calling mechanism alltogether, but I can certainly add a
specific section.

As to adding things to PAPR, let's assume for now that this isn't
possible :-) Besides, those two hypercalls are pretty specific to the
way things are implemented in qemu and are in both case more or less
private mechanisms used to communicate between qemu and the SLOF
firmware we run in inside qemu, they aren't really used by random guest
SW and aren't meant to be.

It's a bit like if you had private calls between seabios and qemu...

Cheers,
Ben.



--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Avi Kivity - May 21, 2012, 1:55 p.m.
On 05/21/2012 02:48 PM, Benjamin Herrenschmidt wrote:
> On Mon, 2012-05-21 at 13:07 +0300, Avi Kivity wrote:
> > > Now I'm adding another one, so yes, it's looking like a trend :-) I'll
> > > look into it, at this stage with only those two, adding some comments in
> > > the header might be plenty enough.
> > 
> > Documentation/virtual/kvm/ppc-pv.txt is a nice central place for these. 
> > It would be even better if you could add them to the spec.
>
> They don't quite fit with the other PV calls in there which use a
> different HV calling mechanism alltogether, but I can certainly add a
> specific section.
>
> As to adding things to PAPR, let's assume for now that this isn't
> possible :-) Besides, those two hypercalls are pretty specific to the
> way things are implemented in qemu and are in both case more or less
> private mechanisms used to communicate between qemu and the SLOF
> firmware we run in inside qemu, they aren't really used by random guest
> SW and aren't meant to be.

Okay.  But let's have a spec, even a kvm-private one, and then an
implementation of that spec, instead of an implementation and some
documentation added as an afterthought (or not).

> It's a bit like if you had private calls between seabios and qemu...

We document those too.
Benjamin Herrenschmidt - May 21, 2012, 9:59 p.m.
On Mon, 2012-05-21 at 16:55 +0300, Avi Kivity wrote:
> 
> Okay.  But let's have a spec, even a kvm-private one, and then an
> implementation of that spec, instead of an implementation and some
> documentation added as an afterthought (or not).
> 
> > It's a bit like if you had private calls between seabios and qemu...
> 
> We document those too.

Well, we have the implementation so in any case the implementation
-will- be an afterthought :-) Anyways, I'll look into adding that to the
patch.

Cheers,
Ben.


--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Benjamin Herrenschmidt - May 25, 2012, 3:12 a.m.
On Mon, 2012-05-21 at 16:55 +0300, Avi Kivity wrote:
> 
> > As to adding things to PAPR, let's assume for now that this isn't
> > possible :-) Besides, those two hypercalls are pretty specific to
> the
> > way things are implemented in qemu and are in both case more or less
> > private mechanisms used to communicate between qemu and the SLOF
> > firmware we run in inside qemu, they aren't really used by random
> guest
> > SW and aren't meant to be.
> 
> Okay.  But let's have a spec, even a kvm-private one, and then an
> implementation of that spec, instead of an implementation and some
> documentation added as an afterthought (or not).
> 
> > It's a bit like if you had private calls between seabios and qemu...
> 
> We document those too.

BTW. This is a qemu patch, and that hypercall isn't KVM related at all,
ie, it's implemented in qemu and is used with or without KVM, so
documenting it in the kernel tree makes little sense. Same goes with
H_RTAS.

I'll add a doc to qemu in my next spin of it.

Cheers,
Ben.


--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Avi Kivity - May 28, 2012, 10:40 a.m.
On 05/25/2012 06:12 AM, Benjamin Herrenschmidt wrote:
> 
> BTW. This is a qemu patch, and that hypercall isn't KVM related at all,
> ie, it's implemented in qemu and is used with or without KVM, so
> documenting it in the kernel tree makes little sense. Same goes with
> H_RTAS.
> 
> I'll add a doc to qemu in my next spin of it.
> 

Depends.  How do you detect it exists?  Are you detecting kvm, or qemu,
or the hypercall itself?

I'd hate us to find ourselves in a maze of disconnected documentation
with no clear guidelines on when a feature is available and when it is not.
Alexander Graf - May 30, 2012, 8:25 a.m.
On 28.05.2012, at 12:40, Avi Kivity wrote:

> On 05/25/2012 06:12 AM, Benjamin Herrenschmidt wrote:
>> 
>> BTW. This is a qemu patch, and that hypercall isn't KVM related at all,
>> ie, it's implemented in qemu and is used with or without KVM, so
>> documenting it in the kernel tree makes little sense. Same goes with
>> H_RTAS.
>> 
>> I'll add a doc to qemu in my next spin of it.
>> 
> 
> Depends.  How do you detect it exists?  Are you detecting kvm, or qemu,
> or the hypercall itself?

The hypercall itself. SLOF is the only user. QEMU provides SLOF. SLOF calls the hypercall. If the hypercall returns "I don't exist", it doesn't exist. :)

> I'd hate us to find ourselves in a maze of disconnected documentation
> with no clear guidelines on when a feature is available and when it is not.

Yeah, but semantically these hypercalls are on the same layer as fw_cfg. So they clearly belong to QEMU. In fact, they're also used when running with emulation.


Alex

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Benjamin Herrenschmidt - May 31, 2012, 3:22 a.m.
On Mon, 2012-05-28 at 13:40 +0300, Avi Kivity wrote:
> Depends.  How do you detect it exists?  Are you detecting kvm, or qemu,
> or the hypercall itself?
> 
> I'd hate us to find ourselves in a maze of disconnected documentation
> with no clear guidelines on when a feature is available and when it is not.

At the moment SLOF just "uses it" when using the frame buffer. We could
advertise its presence via the device-tree, there's already stuff there
to expose what hypercalls or set of hypecalls are implemented for PAPR,
we could add qemu specific extensions.

Cheers,
Ben.


--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Patch

diff --git a/hw/spapr.h b/hw/spapr.h
index 7c497aa..0343f33 100644
--- a/hw/spapr.h
+++ b/hw/spapr.h
@@ -264,7 +264,8 @@  typedef struct sPAPREnvironment {
  */
 #define KVMPPC_HCALL_BASE       0xf000
 #define KVMPPC_H_RTAS           (KVMPPC_HCALL_BASE + 0x0)
-#define KVMPPC_HCALL_MAX        KVMPPC_H_RTAS
+#define KVMPPC_H_LOGICAL_MEMOP  (KVMPPC_HCALL_BASE + 0x1)
+#define KVMPPC_HCALL_MAX        KVMPPC_H_LOGICAL_MEMOP
 
 extern sPAPREnvironment *spapr;
 
diff --git a/hw/spapr_hcall.c b/hw/spapr_hcall.c
index 94bb504..c5c26dc 100644
--- a/hw/spapr_hcall.c
+++ b/hw/spapr_hcall.c
@@ -608,6 +608,54 @@  static target_ulong h_logical_store(CPUPPCState *env, sPAPREnvironment *spapr,
     return H_PARAMETER;
 }
 
+static target_ulong h_logical_memop(CPUPPCState *env, sPAPREnvironment *spapr,
+				    target_ulong opcode, target_ulong *args)
+{
+    target_ulong dst   = args[0]; /* Destination address */
+    target_ulong src   = args[1]; /* Source address */
+    target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
+    target_ulong count = args[3]; /* Element count */
+    target_ulong op    = args[4]; /* 0 = copy, 1 = invert */
+    uint64_t tmp;
+    unsigned int mask = (1 << esize) - 1;
+    int step = 1 << esize;
+
+    if (count > 0x80000000)
+	return H_PARAMETER;
+
+    if ((dst & mask) || (src & mask))
+	return H_PARAMETER;
+
+    if (dst >= src && dst < (src + (count << esize))) {
+	    dst = dst + ((count - 1) << esize);
+	    src = src + ((count - 1) << esize);
+	    step = -step;
+    }
+
+    while(count--) {
+        switch (esize) {
+        case 0: tmp = ldub_phys(src); break;
+        case 1: tmp = lduw_phys(src); break;
+        case 2: tmp = ldl_phys(src);  break;
+        case 3: tmp = ldq_phys(src);  break;
+        default:
+        return H_PARAMETER;
+	}
+        if (op)
+		tmp = ~tmp;
+        switch (esize) {
+        case 0: stb_phys(dst, tmp); break;
+        case 1: stw_phys(dst, tmp); break;
+        case 2: stl_phys(dst, tmp); break;
+        case 3: stq_phys(dst, tmp); break;
+	}
+	dst = dst + step;
+	src = src + step;
+    }
+
+    return H_SUCCESS;
+}
+
 static target_ulong h_logical_icbi(CPUPPCState *env, sPAPREnvironment *spapr,
                                    target_ulong opcode, target_ulong *args)
 {
@@ -700,6 +748,7 @@  static void hypercall_register_types(void)
     spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
     spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
     spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
+    spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
 
     /* qemu/KVM-PPC specific hcalls */
     spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);