diff mbox

[Qemu] Change spice-server protocol for GL texture passing

Message ID 1468590577-30914-2-git-send-email-fziglio@redhat.com
State New
Headers show

Commit Message

Frediano Ziglio July 15, 2016, 1:49 p.m. UTC
---
 ui/spice-core.c    |  5 -----
 ui/spice-display.c | 29 ++++++++---------------------
 2 files changed, 8 insertions(+), 26 deletions(-)

Comments

Frediano Ziglio July 15, 2016, 1:56 p.m. UTC | #1
Forgot to add RFC to the subject

Frediano

> 
> ---
>  ui/spice-core.c    |  5 -----
>  ui/spice-display.c | 29 ++++++++---------------------
>  2 files changed, 8 insertions(+), 26 deletions(-)
> 
> diff --git a/ui/spice-core.c b/ui/spice-core.c
> index da05054..f7647f7 100644
> --- a/ui/spice-core.c
> +++ b/ui/spice-core.c
> @@ -828,11 +828,6 @@ void qemu_spice_init(void)
>  
>  #ifdef HAVE_SPICE_GL
>      if (qemu_opt_get_bool(opts, "gl", 0)) {
> -        if ((port != 0) || (tls_port != 0)) {
> -            error_report("SPICE GL support is local-only for now and "
> -                         "incompatible with -spice port/tls-port");
> -            exit(1);
> -        }
>          if (egl_rendernode_init() != 0) {
>              error_report("Failed to initialize EGL render node for SPICE
>              GL");
>              exit(1);
> diff --git a/ui/spice-display.c b/ui/spice-display.c
> index 2a77a54..72137bd 100644
> --- a/ui/spice-display.c
> +++ b/ui/spice-display.c
> @@ -852,6 +852,10 @@ static void qemu_spice_gl_block_timer(void *opaque)
>  static QEMUGLContext qemu_spice_gl_create_context(DisplayChangeListener
>  *dcl,
>                                                    QEMUGLParams *params)
>  {
> +    SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl);
> +
> +    spice_qxl_gl_init(&ssd->qxl, qemu_egl_display, qemu_egl_rn_ctx);
> +
>      eglMakeCurrent(qemu_egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
>                     qemu_egl_rn_ctx);
>      return qemu_egl_create_context(dcl, params);
> @@ -864,28 +868,11 @@ static void qemu_spice_gl_scanout(DisplayChangeListener
> *dcl,
>                                    uint32_t w, uint32_t h)
>  {
>      SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl);
> -    EGLint stride = 0, fourcc = 0;
> -    int fd = -1;
> -
> -    if (tex_id) {
> -        fd = egl_get_fd_for_texture(tex_id, &stride, &fourcc);
> -        if (fd < 0) {
> -            fprintf(stderr, "%s: failed to get fd for texture\n", __func__);
> -            return;
> -        }
> -        dprint(1, "%s: %dx%d (stride %d, fourcc 0x%x)\n", __func__,
> -               w, h, stride, fourcc);
> -    } else {
> -        dprint(1, "%s: no texture (no framebuffer)\n", __func__);
> -    }
> -
> -    assert(!tex_id || fd >= 0);
>  
> -    /* note: spice server will close the fd */
> -    spice_qxl_gl_scanout(&ssd->qxl, fd,
> -                         surface_width(ssd->ds),
> -                         surface_height(ssd->ds),
> -                         stride, fourcc, y_0_top);
> +    spice_qxl_gl_scanout_texture(&ssd->qxl, tex_id,
> +                                 surface_width(ssd->ds),
> +                                 surface_height(ssd->ds),
> +                                 y_0_top);
>  
>      qemu_spice_gl_monitor_config(ssd, x, y, w, h);
>  }
Marc-Andre Lureau July 18, 2016, 4:41 p.m. UTC | #2
Hi

----- Original Message -----
> Forgot to add RFC to the subject
> 

What's the rationale? if you share the texture id, you must share the GL context too, right? Why not use a lower level dmabuf fd that can be imported by the server gl context (which is also what the protocol require anyway)?

> 
> > 
> > ---
> >  ui/spice-core.c    |  5 -----
> >  ui/spice-display.c | 29 ++++++++---------------------
> >  2 files changed, 8 insertions(+), 26 deletions(-)
> > 
> > diff --git a/ui/spice-core.c b/ui/spice-core.c
> > index da05054..f7647f7 100644
> > --- a/ui/spice-core.c
> > +++ b/ui/spice-core.c
> > @@ -828,11 +828,6 @@ void qemu_spice_init(void)
> >  
> >  #ifdef HAVE_SPICE_GL
> >      if (qemu_opt_get_bool(opts, "gl", 0)) {
> > -        if ((port != 0) || (tls_port != 0)) {
> > -            error_report("SPICE GL support is local-only for now and "
> > -                         "incompatible with -spice port/tls-port");
> > -            exit(1);
> > -        }
> >          if (egl_rendernode_init() != 0) {
> >              error_report("Failed to initialize EGL render node for SPICE
> >              GL");
> >              exit(1);
> > diff --git a/ui/spice-display.c b/ui/spice-display.c
> > index 2a77a54..72137bd 100644
> > --- a/ui/spice-display.c
> > +++ b/ui/spice-display.c
> > @@ -852,6 +852,10 @@ static void qemu_spice_gl_block_timer(void *opaque)
> >  static QEMUGLContext qemu_spice_gl_create_context(DisplayChangeListener
> >  *dcl,
> >                                                    QEMUGLParams *params)
> >  {
> > +    SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl);
> > +
> > +    spice_qxl_gl_init(&ssd->qxl, qemu_egl_display, qemu_egl_rn_ctx);
> > +
> >      eglMakeCurrent(qemu_egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
> >                     qemu_egl_rn_ctx);
> >      return qemu_egl_create_context(dcl, params);
> > @@ -864,28 +868,11 @@ static void
> > qemu_spice_gl_scanout(DisplayChangeListener
> > *dcl,
> >                                    uint32_t w, uint32_t h)
> >  {
> >      SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl);
> > -    EGLint stride = 0, fourcc = 0;
> > -    int fd = -1;
> > -
> > -    if (tex_id) {
> > -        fd = egl_get_fd_for_texture(tex_id, &stride, &fourcc);
> > -        if (fd < 0) {
> > -            fprintf(stderr, "%s: failed to get fd for texture\n",
> > __func__);
> > -            return;
> > -        }
> > -        dprint(1, "%s: %dx%d (stride %d, fourcc 0x%x)\n", __func__,
> > -               w, h, stride, fourcc);
> > -    } else {
> > -        dprint(1, "%s: no texture (no framebuffer)\n", __func__);
> > -    }
> > -
> > -    assert(!tex_id || fd >= 0);
> >  
> > -    /* note: spice server will close the fd */
> > -    spice_qxl_gl_scanout(&ssd->qxl, fd,
> > -                         surface_width(ssd->ds),
> > -                         surface_height(ssd->ds),
> > -                         stride, fourcc, y_0_top);
> > +    spice_qxl_gl_scanout_texture(&ssd->qxl, tex_id,
> > +                                 surface_width(ssd->ds),
> > +                                 surface_height(ssd->ds),
> > +                                 y_0_top);
> >  
> >      qemu_spice_gl_monitor_config(ssd, x, y, w, h);
> >  }
> _______________________________________________
> Spice-devel mailing list
> Spice-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/spice-devel
>
Frediano Ziglio July 19, 2016, 9:45 a.m. UTC | #3
> 
> Hi
> 
> ----- Original Message -----
> > Forgot to add RFC to the subject
> > 
> 
> What's the rationale? if you share the texture id, you must share the GL
> context too, right? Why not use a lower level dmabuf fd that can be imported
> by the server gl context (which is also what the protocol require anyway)?
> 

Yes, the display and context are shared using spice_qxl_gl_init.
Importing again into a gl context would mean that you have to export the
DRM prime and import again in a separate (not shared) context.
It's also doable, just add 2 system call and wrapping/unwrapping.
Would be good to pass the EGLDisplay then so spice-server don't have to
initialize again possibly using another physical card.

We have 4 cases:
- client not connected;
- local client;
- remote client, software encoding;
- remote client, hardware encoding.

Client not connected
Passing the texture is a no-operation, passing DRM prime require to
extract the handle and close every frame.

Local client
In this case there is no overhear, DRM prime is always extracted and
passed to the client

Remote client, software encoding
Due to different problems (DRM prime not mmap-able or data not portably
extractable) we'll need to import the DRM prime into a different EGL
context (not shared with the original one), create another texture,
extract data and free all texture/DRM prime.

Remote client, hardware encoding
It's not clear if it's better to pass the DRM prime or the texture,
some API pass the texture. I got confirmation that gst_dmabuf_allocator_new
could try to use mmap in some cases so we should check this somehow
to make sure it does not.

Taking into account that DRM prime came with "free" reference counting
creating the DRM prime from texture basically increase a counter which is
used by our implementation to make sure texture is still existing so
possibly passing texture instead of DRM prime just save a system call
in the normal case. I don't know what happens to the DRM object handle when
the texture is destroyed (in Qemu) with glDeleteTextures if bindings keep
texture "alive" or are all reset.


Could be that keeping qemu_spice_gl_scanout and spice_qxl_gl_scanout_texture
as current implementation and adding a spice_qxl_gl_init/spice_qxl_gl_setup
passing just QXLInstance and EGLDisplay is a better solution.

Does is sound reasonable?

Frediano

> > 
> > > 
> > > ---
> > >  ui/spice-core.c    |  5 -----
> > >  ui/spice-display.c | 29 ++++++++---------------------
> > >  2 files changed, 8 insertions(+), 26 deletions(-)
> > > 
> > > diff --git a/ui/spice-core.c b/ui/spice-core.c
> > > index da05054..f7647f7 100644
> > > --- a/ui/spice-core.c
> > > +++ b/ui/spice-core.c
> > > @@ -828,11 +828,6 @@ void qemu_spice_init(void)
> > >  
> > >  #ifdef HAVE_SPICE_GL
> > >      if (qemu_opt_get_bool(opts, "gl", 0)) {
> > > -        if ((port != 0) || (tls_port != 0)) {
> > > -            error_report("SPICE GL support is local-only for now and "
> > > -                         "incompatible with -spice port/tls-port");
> > > -            exit(1);
> > > -        }
> > >          if (egl_rendernode_init() != 0) {
> > >              error_report("Failed to initialize EGL render node for SPICE
> > >              GL");
> > >              exit(1);
> > > diff --git a/ui/spice-display.c b/ui/spice-display.c
> > > index 2a77a54..72137bd 100644
> > > --- a/ui/spice-display.c
> > > +++ b/ui/spice-display.c
> > > @@ -852,6 +852,10 @@ static void qemu_spice_gl_block_timer(void *opaque)
> > >  static QEMUGLContext qemu_spice_gl_create_context(DisplayChangeListener
> > >  *dcl,
> > >                                                    QEMUGLParams *params)
> > >  {
> > > +    SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay,
> > > dcl);
> > > +
> > > +    spice_qxl_gl_init(&ssd->qxl, qemu_egl_display, qemu_egl_rn_ctx);
> > > +
> > >      eglMakeCurrent(qemu_egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
> > >                     qemu_egl_rn_ctx);
> > >      return qemu_egl_create_context(dcl, params);
> > > @@ -864,28 +868,11 @@ static void
> > > qemu_spice_gl_scanout(DisplayChangeListener
> > > *dcl,
> > >                                    uint32_t w, uint32_t h)
> > >  {
> > >      SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay,
> > >      dcl);
> > > -    EGLint stride = 0, fourcc = 0;
> > > -    int fd = -1;
> > > -
> > > -    if (tex_id) {
> > > -        fd = egl_get_fd_for_texture(tex_id, &stride, &fourcc);
> > > -        if (fd < 0) {
> > > -            fprintf(stderr, "%s: failed to get fd for texture\n",
> > > __func__);
> > > -            return;
> > > -        }
> > > -        dprint(1, "%s: %dx%d (stride %d, fourcc 0x%x)\n", __func__,
> > > -               w, h, stride, fourcc);
> > > -    } else {
> > > -        dprint(1, "%s: no texture (no framebuffer)\n", __func__);
> > > -    }
> > > -
> > > -    assert(!tex_id || fd >= 0);
> > >  
> > > -    /* note: spice server will close the fd */
> > > -    spice_qxl_gl_scanout(&ssd->qxl, fd,
> > > -                         surface_width(ssd->ds),
> > > -                         surface_height(ssd->ds),
> > > -                         stride, fourcc, y_0_top);
> > > +    spice_qxl_gl_scanout_texture(&ssd->qxl, tex_id,
> > > +                                 surface_width(ssd->ds),
> > > +                                 surface_height(ssd->ds),
> > > +                                 y_0_top);
> > >  
> > >      qemu_spice_gl_monitor_config(ssd, x, y, w, h);
> > >  }
Marc-Andre Lureau July 19, 2016, 12:56 p.m. UTC | #4
Hi

----- Original Message -----
> > 
> > Hi
> > 
> > ----- Original Message -----
> > > Forgot to add RFC to the subject
> > > 
> > 
> > What's the rationale? if you share the texture id, you must share the GL
> > context too, right? Why not use a lower level dmabuf fd that can be
> > imported
> > by the server gl context (which is also what the protocol require anyway)?
> > 
> 
> Yes, the display and context are shared using spice_qxl_gl_init.
> Importing again into a gl context would mean that you have to export the
> DRM prime and import again in a separate (not shared) context.
> It's also doable, just add 2 system call and wrapping/unwrapping.
> Would be good to pass the EGLDisplay then so spice-server don't have to
> initialize again possibly using another physical card.
> 
> We have 4 cases:
> - client not connected;
> - local client;
> - remote client, software encoding;
> - remote client, hardware encoding.
> 

Before optimizing those syscalls and changing API etc, I would like to know if they are expensive (it's not my feeling)

Also, it is possible virglrenderer could be optimized to avoid exporting the prime fd for each scanout, if the backing image is always the same.

Sharing a GL context brings new issues. If spice server could use its own context, we have some context isolation (gl is still bad at MT iirc).

> Client not connected
> Passing the texture is a no-operation, passing DRM prime require to
> extract the handle and close every frame.
> 
> Local client
> In this case there is no overhear, DRM prime is always extracted and
> passed to the client
> 
> Remote client, software encoding
> Due to different problems (DRM prime not mmap-able or data not portably
> extractable) we'll need to import the DRM prime into a different EGL
> context (not shared with the original one), create another texture,
> extract data and free all texture/DRM prime.

I don't think we have strong reasons to support software encoding, video encoding is really expensive, and that mmap/copy is not going to be marginal, so even less these 2 syscalls.

> 
> Remote client, hardware encoding
> It's not clear if it's better to pass the DRM prime or the texture,
> some API pass the texture. I got confirmation that gst_dmabuf_allocator_new
> could try to use mmap in some cases so we should check this somehow
> to make sure it does not.
> 

We definitely don't want any mmap/copy to take place for hw encoding.

> Taking into account that DRM prime came with "free" reference counting
> creating the DRM prime from texture basically increase a counter which is
> used by our implementation to make sure texture is still existing so
> possibly passing texture instead of DRM prime just save a system call
> in the normal case. I don't know what happens to the DRM object handle when
> the texture is destroyed (in Qemu) with glDeleteTextures if bindings keep
> texture "alive" or are all reset.
> 
> 
> Could be that keeping qemu_spice_gl_scanout and spice_qxl_gl_scanout_texture
> as current implementation and adding a spice_qxl_gl_init/spice_qxl_gl_setup
> passing just QXLInstance and EGLDisplay is a better solution.
> 
> Does is sound reasonable?

I wouldn't rush with API changes before we have a better idea how hw encoding can be done without mmap and wether its really worth it (I would rather see spice spawning a seperate gl context and process for the encoding than sharing it)
Frediano Ziglio July 19, 2016, 1:41 p.m. UTC | #5
> Hi
> 
> ----- Original Message -----
> > > 
> > > Hi
> > > 
> > > ----- Original Message -----
> > > > Forgot to add RFC to the subject
> > > > 
> > > 
> > > What's the rationale? if you share the texture id, you must share the GL
> > > context too, right? Why not use a lower level dmabuf fd that can be
> > > imported
> > > by the server gl context (which is also what the protocol require
> > > anyway)?
> > > 
> > 
> > Yes, the display and context are shared using spice_qxl_gl_init.
> > Importing again into a gl context would mean that you have to export the
> > DRM prime and import again in a separate (not shared) context.
> > It's also doable, just add 2 system call and wrapping/unwrapping.
> > Would be good to pass the EGLDisplay then so spice-server don't have to
> > initialize again possibly using another physical card.
> > 
> > We have 4 cases:
> > - client not connected;
> > - local client;
> > - remote client, software encoding;
> > - remote client, hardware encoding.
> > 
> 
> Before optimizing those syscalls and changing API etc, I would like to know
> if they are expensive (it's not my feeling)
> 
> Also, it is possible virglrenderer could be optimized to avoid exporting the
> prime fd for each scanout, if the backing image is always the same.
> 
> Sharing a GL context brings new issues. If spice server could use its own
> context, we have some context isolation (gl is still bad at MT iirc).
> 
> > Client not connected
> > Passing the texture is a no-operation, passing DRM prime require to
> > extract the handle and close every frame.
> > 
> > Local client
> > In this case there is no overhear, DRM prime is always extracted and
> > passed to the client
> > 
> > Remote client, software encoding
> > Due to different problems (DRM prime not mmap-able or data not portably
> > extractable) we'll need to import the DRM prime into a different EGL
> > context (not shared with the original one), create another texture,
> > extract data and free all texture/DRM prime.
> 
> I don't think we have strong reasons to support software encoding, video
> encoding is really expensive, and that mmap/copy is not going to be
> marginal, so even less these 2 syscalls.
> 

Using HW encoding is not easy at it seems:
- you have to have client supporting server HW encoders;
- you have to install additional software often closed source, accepting
  patents;
- you have to have right permission on the system.
What are you doing if these option are not respected? Do not allow
connections? Showing blank screen?
With a good (local) connection I can easily play using software MJPEG, why
we should avoid such configurations?

> > 
> > Remote client, hardware encoding
> > It's not clear if it's better to pass the DRM prime or the texture,
> > some API pass the texture. I got confirmation that gst_dmabuf_allocator_new
> > could try to use mmap in some cases so we should check this somehow
> > to make sure it does not.
> > 
> 
> We definitely don't want any mmap/copy to take place for hw encoding.
> 

Sure but that's hard to avoid fall backs with all different setups.

> > Taking into account that DRM prime came with "free" reference counting
> > creating the DRM prime from texture basically increase a counter which is
> > used by our implementation to make sure texture is still existing so
> > possibly passing texture instead of DRM prime just save a system call
> > in the normal case. I don't know what happens to the DRM object handle when
> > the texture is destroyed (in Qemu) with glDeleteTextures if bindings keep
> > texture "alive" or are all reset.
> > 
> > 
> > Could be that keeping qemu_spice_gl_scanout and
> > spice_qxl_gl_scanout_texture
> > as current implementation and adding a spice_qxl_gl_init/spice_qxl_gl_setup
> > passing just QXLInstance and EGLDisplay is a better solution.
> > 
> > Does is sound reasonable?
> 
> I wouldn't rush with API changes before we have a better idea how hw encoding
> can be done without mmap and wether its really worth it (I would rather see
> spice spawning a seperate gl context and process for the encoding than
> sharing it)
> 

I'm not rushing, this was the idea of RFC.
Spawing a process helps just to solve library licenses.
My list of patches for spice-server used the passed context just to create
a new context which is shared with the provided one, as I said using different
gl context and importing the DRM prime is a good option.

Passing the EGLDisplay from Qemu helps solving:
- double EGL initialization;
- multiple cards issues;
- -chroot/-runas Qemu options, where you loose access and you are not
  able to initialize EGL/VAAPI again.
I can see that Qemu searching for the card is different from VAAPI.
In case of multiple cards and Qemu run as a daemon (not having Xwayland/X)
you can end up using two physical cards.

I'll try VAAPI DRM prime passing, I hope this week.

Frediano
Christophe Fergeau July 20, 2016, 3:13 p.m. UTC | #6
On Tue, Jul 19, 2016 at 09:41:22AM -0400, Frediano Ziglio wrote:
> > I don't think we have strong reasons to support software encoding, video
> > encoding is really expensive, and that mmap/copy is not going to be
> > marginal, so even less these 2 syscalls.
> > 
> 
> Using HW encoding is not easy at it seems:
> - you have to have client supporting server HW encoders;
> - you have to install additional software often closed source, accepting
>   patents;
> - you have to have right permission on the system.
> What are you doing if these option are not respected? Do not allow
> connections? Showing blank screen?
> With a good (local) connection I can easily play using software MJPEG, why
> we should avoid such configurations?

What we should be aiming/optimizing for is the hardware-accelerated
case. We will need a fallback when this is not usable, but the various
copies/encoding/... are going to be very expensive by themselves. Are
these changes (passing texture rather than dmabuf) making a significant
difference with software encoding?

Christophe
Frediano Ziglio July 21, 2016, 11:43 a.m. UTC | #7
> 
> On Tue, Jul 19, 2016 at 09:41:22AM -0400, Frediano Ziglio wrote:
> > > I don't think we have strong reasons to support software encoding, video
> > > encoding is really expensive, and that mmap/copy is not going to be
> > > marginal, so even less these 2 syscalls.
> > > 
> > 
> > Using HW encoding is not easy at it seems:
> > - you have to have client supporting server HW encoders;
> > - you have to install additional software often closed source, accepting
> >   patents;
> > - you have to have right permission on the system.
> > What are you doing if these option are not respected? Do not allow
> > connections? Showing blank screen?
> > With a good (local) connection I can easily play using software MJPEG, why
> > we should avoid such configurations?
> 
> What we should be aiming/optimizing for is the hardware-accelerated
> case. We will need a fallback when this is not usable, but the various
> copies/encoding/... are going to be very expensive by themselves. Are
> these changes (passing texture rather than dmabuf) making a significant
> difference with software encoding?
> 
> Christophe
> 

Got some experimental results passing DRM primes to gstreamer
(https://www.youtube.com/watch?v=NFDvMHfXUHA).
With VAAPI it's working the full frame processing decreased by a 50%.
No, they are not expensive and we could just (in case of fallback)
import in a new GL context to have them extracted correctly.
I'm actually trying to make it works again with software encoders (no VAAPI
and having to extract raw data). It's working as with kernel 4.6 i915
allows mmap but as the texture are in a different format I get quite some
garbage (they should be extracted with GL which know these problems).

Frediano
diff mbox

Patch

diff --git a/ui/spice-core.c b/ui/spice-core.c
index da05054..f7647f7 100644
--- a/ui/spice-core.c
+++ b/ui/spice-core.c
@@ -828,11 +828,6 @@  void qemu_spice_init(void)
 
 #ifdef HAVE_SPICE_GL
     if (qemu_opt_get_bool(opts, "gl", 0)) {
-        if ((port != 0) || (tls_port != 0)) {
-            error_report("SPICE GL support is local-only for now and "
-                         "incompatible with -spice port/tls-port");
-            exit(1);
-        }
         if (egl_rendernode_init() != 0) {
             error_report("Failed to initialize EGL render node for SPICE GL");
             exit(1);
diff --git a/ui/spice-display.c b/ui/spice-display.c
index 2a77a54..72137bd 100644
--- a/ui/spice-display.c
+++ b/ui/spice-display.c
@@ -852,6 +852,10 @@  static void qemu_spice_gl_block_timer(void *opaque)
 static QEMUGLContext qemu_spice_gl_create_context(DisplayChangeListener *dcl,
                                                   QEMUGLParams *params)
 {
+    SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl);
+
+    spice_qxl_gl_init(&ssd->qxl, qemu_egl_display, qemu_egl_rn_ctx);
+
     eglMakeCurrent(qemu_egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
                    qemu_egl_rn_ctx);
     return qemu_egl_create_context(dcl, params);
@@ -864,28 +868,11 @@  static void qemu_spice_gl_scanout(DisplayChangeListener *dcl,
                                   uint32_t w, uint32_t h)
 {
     SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl);
-    EGLint stride = 0, fourcc = 0;
-    int fd = -1;
-
-    if (tex_id) {
-        fd = egl_get_fd_for_texture(tex_id, &stride, &fourcc);
-        if (fd < 0) {
-            fprintf(stderr, "%s: failed to get fd for texture\n", __func__);
-            return;
-        }
-        dprint(1, "%s: %dx%d (stride %d, fourcc 0x%x)\n", __func__,
-               w, h, stride, fourcc);
-    } else {
-        dprint(1, "%s: no texture (no framebuffer)\n", __func__);
-    }
-
-    assert(!tex_id || fd >= 0);
 
-    /* note: spice server will close the fd */
-    spice_qxl_gl_scanout(&ssd->qxl, fd,
-                         surface_width(ssd->ds),
-                         surface_height(ssd->ds),
-                         stride, fourcc, y_0_top);
+    spice_qxl_gl_scanout_texture(&ssd->qxl, tex_id,
+                                 surface_width(ssd->ds),
+                                 surface_height(ssd->ds),
+                                 y_0_top);
 
     qemu_spice_gl_monitor_config(ssd, x, y, w, h);
 }