diff mbox

[2/5] socket: Add a reconnect option.

Message ID 1265031265-14717-3-git-send-email-ian.molton@collabora.co.uk
State New
Headers show

Commit Message

Ian Molton Feb. 1, 2010, 1:34 p.m. UTC
Add a reconnect option that allows sockets to reconnect (after a
specified delay) to the specified server. This makes the virtio-rng driver
useful in production environments where the EGD server may need to be restarted.

Signed-off-by: Ian Molton <ian.molton@collabora.co.uk>
---
 qemu-char.c   |  159 +++++++++++++++++++++++++++++++++++++++++++--------------
 qemu-char.h   |    2 +
 qemu-config.c |    3 +
 vl.c          |    4 ++
 4 files changed, 129 insertions(+), 39 deletions(-)

Comments

Anthony Liguori Feb. 1, 2010, 3:25 p.m. UTC | #1
On 02/01/2010 07:34 AM, Ian Molton wrote:
> 	Add a reconnect option that allows sockets to reconnect (after a
> specified delay) to the specified server. This makes the virtio-rng driver
> useful in production environments where the EGD server may need to be restarted.
>
> Signed-off-by: Ian Molton<ian.molton@collabora.co.uk>
>    

I went back and looked at the last series and found my feedback.  I had 
suggested that instead of automatically reconnecting, a mechanism should 
be added for a user to initiate a reconnect.

Additionally, we should emit events upon disconnect through QMP (now 
that we have that functionality).

The main reason I dislike automatic reconnecting is that there is no 
correct way to handle the period of time while the VM is disconnected.

A user might want to pause the VM, trigger a live migration to a 
non-broken system, checkpoint the VM, etc.

Auto reconnecting is implementing a policy to handle the failure within 
QEMU which is not universally the correct choice.  This isn't so bad 
except for the fact that you aren't providing the mechanisms for users 
to implement other policies which means they're stuck with this 
particular policy.

Regards,

Anthony Liguori

> ---
>   qemu-char.c   |  159 +++++++++++++++++++++++++++++++++++++++++++--------------
>   qemu-char.h   |    2 +
>   qemu-config.c |    3 +
>   vl.c          |    4 ++
>   4 files changed, 129 insertions(+), 39 deletions(-)
>
> diff --git a/qemu-char.c b/qemu-char.c
> index 800ee6c..016afd0 100644
> --- a/qemu-char.c
> +++ b/qemu-char.c
> @@ -1870,8 +1870,12 @@ typedef struct {
>       int max_size;
>       int do_telnetopt;
>       int do_nodelay;
> +    int reconnect;
>       int is_unix;
>       int msgfd;
> +    QemuOpts *opts;
> +    CharDriverState *chr;
> +    int (*setup)(QemuOpts *opts);
>   } TCPCharDriver;
>
>   static void tcp_chr_accept(void *opaque);
> @@ -2011,6 +2015,8 @@ static ssize_t tcp_chr_recv(CharDriverState *chr, char *buf, size_t len)
>   }
>   #endif
>
> +static void qemu_chr_sched_reconnect(TCPCharDriver *s);
> +
>   static void tcp_chr_read(void *opaque)
>   {
>       CharDriverState *chr = opaque;
> @@ -2030,10 +2036,16 @@ static void tcp_chr_read(void *opaque)
>           if (s->listen_fd>= 0) {
>               qemu_set_fd_handler(s->listen_fd, tcp_chr_accept, NULL, chr);
>           }
> -        qemu_set_fd_handler(s->fd, NULL, NULL, NULL);
> +        if (!s->reconnect) {
> +            qemu_set_fd_handler(s->fd, NULL, NULL, NULL);
> +        }
>           closesocket(s->fd);
>           s->fd = -1;
> -        qemu_chr_event(chr, CHR_EVENT_CLOSED);
> +        if (s->reconnect) {
> +            qemu_chr_sched_reconnect(s);
> +        } else {
> +            qemu_chr_event(chr, CHR_EVENT_CLOSED);
> +        }
>       } else if (size>  0) {
>           if (s->do_telnetopt)
>               tcp_chr_process_IAC_bytes(chr, s, buf,&size);
> @@ -2133,11 +2145,92 @@ static void tcp_chr_close(CharDriverState *chr)
>       qemu_chr_event(chr, CHR_EVENT_CLOSED);
>   }
>
> +static int qemu_chr_connect_socket(TCPCharDriver *s)
> +{
> +    QemuOpts *opts = s->opts;
> +    int is_listen;
> +    int fd;
> +    int is_waitconnect;
> +    int do_nodelay;
> +
> +    is_waitconnect = qemu_opt_get_bool(opts, "wait", 1);
> +    is_listen      = qemu_opt_get_bool(opts, "server", 0);
> +    do_nodelay     = !qemu_opt_get_bool(opts, "delay", 1);
> +
> +
> +    fd = s->setup(s->opts);
> +    if (fd<  0)
> +        return 0;
> +
> +    if (!is_waitconnect)
> +        socket_set_nonblock(fd);
> +
> +    if (is_listen) {
> +        s->listen_fd = fd;
> +        qemu_set_fd_handler(s->listen_fd, tcp_chr_accept, NULL, s->chr);
> +        if (is_waitconnect) {
> +            printf("QEMU waiting for connection on: %s\n",
> +                   s->chr->filename);
> +            tcp_chr_accept(s->chr);
> +            socket_set_nonblock(s->listen_fd);
> +        }
> +    } else {
> +        s->fd = fd;
> +        socket_set_nodelay(fd);
> +        tcp_chr_connect(s->chr);
> +    }
> +
> +    return 1;
> +}
> +
> +static QLIST_HEAD(reconnect_list_head, reconnect_list_entry) rcl_head;
> +
> +typedef struct reconnect_list_entry {
> +    TCPCharDriver *s;
> +    uint64_t when;
> +    QLIST_ENTRY(reconnect_list_entry) entries;
> +} reconnect_list_entry;
> +
> +static void qemu_chr_sched_reconnect(TCPCharDriver *s)
> +{
> +    reconnect_list_entry *new = qemu_malloc(sizeof(*new));
> +    struct timeval tv;
> +
> +    qemu_gettimeofday(&tv);
> +    new->s = s;
> +    new->when = (s->reconnect + tv.tv_sec) * 1000000 + tv.tv_usec;
> +    QLIST_INSERT_HEAD(&rcl_head, new, entries);
> +}
> +
> +void qemu_chr_reconnect(void)
> +{
> +    struct timeval tv;
> +    uint64_t now;
> +    reconnect_list_entry *np;
> +
> +    if (!rcl_head.lh_first)
> +        return;
> +
> +    gettimeofday(&tv, NULL);
> +    now = tv.tv_sec * 1000000 + tv.tv_usec;
> +
> +    for (np = rcl_head.lh_first; np != NULL; np = np->entries.le_next) {
> +        if (np->when<= now) {
> +            if (qemu_chr_connect_socket(np->s)) {
> +                qemu_chr_event(np->s->chr, CHR_EVENT_RECONNECTED);
> +                QLIST_REMOVE(np, entries);
> +            }
> +            else {
> +                np->when += np->s->reconnect * 1000000;
> +            }
> +        }
> +    }
> +}
> +
>   static CharDriverState *qemu_chr_open_socket(QemuOpts *opts)
>   {
>       CharDriverState *chr = NULL;
>       TCPCharDriver *s = NULL;
> -    int fd = -1;
>       int is_listen;
>       int is_waitconnect;
>       int do_nodelay;
> @@ -2145,34 +2238,40 @@ static CharDriverState *qemu_chr_open_socket(QemuOpts *opts)
>       int is_telnet;
>
>       is_listen      = qemu_opt_get_bool(opts, "server", 0);
> +    is_unix        = qemu_opt_get(opts, "path") != NULL;
> +
>       is_waitconnect = qemu_opt_get_bool(opts, "wait", 1);
>       is_telnet      = qemu_opt_get_bool(opts, "telnet", 0);
>       do_nodelay     = !qemu_opt_get_bool(opts, "delay", 1);
> -    is_unix        = qemu_opt_get(opts, "path") != NULL;
> -    if (!is_listen)
> +
> +    if (!is_listen) {
>           is_waitconnect = 0;
> +    } else {
> +        if (is_telnet)
> +            s->do_telnetopt = 1;
> +    }
> +
>
> -    chr = qemu_mallocz(sizeof(CharDriverState));
>       s = qemu_mallocz(sizeof(TCPCharDriver));
> +    chr = qemu_mallocz(sizeof(CharDriverState));
> +    s->opts = opts;
> +
> +    if (!is_listen&&  !is_telnet)
> +        s->reconnect = qemu_opt_get_number(opts, "reconnect", 0);
>
>       if (is_unix) {
>           if (is_listen) {
> -            fd = unix_listen_opts(opts);
> +            s->setup = unix_listen_opts;
>           } else {
> -            fd = unix_connect_opts(opts);
> +            s->setup = unix_connect_opts;
>           }
>       } else {
>           if (is_listen) {
> -            fd = inet_listen_opts(opts, 0);
> +            s->setup = inet_listen_opts;
>           } else {
> -            fd = inet_connect_opts(opts);
> +            s->setup = inet_connect_opts;
>           }
>       }
> -    if (fd<  0)
> -        goto fail;
> -
> -    if (!is_waitconnect)
> -        socket_set_nonblock(fd);
>
>       s->connected = 0;
>       s->fd = -1;
> @@ -2186,19 +2285,6 @@ static CharDriverState *qemu_chr_open_socket(QemuOpts *opts)
>       chr->chr_close = tcp_chr_close;
>       chr->get_msgfd = tcp_get_msgfd;
>
> -    if (is_listen) {
> -        s->listen_fd = fd;
> -        qemu_set_fd_handler(s->listen_fd, tcp_chr_accept, NULL, chr);
> -        if (is_telnet)
> -            s->do_telnetopt = 1;
> -
> -    } else {
> -        s->connected = 1;
> -        s->fd = fd;
> -        socket_set_nodelay(fd);
> -        tcp_chr_connect(chr);
> -    }
> -
>       /* for "info chardev" monitor command */
>       chr->filename = qemu_malloc(256);
>       if (is_unix) {
> @@ -2215,19 +2301,14 @@ static CharDriverState *qemu_chr_open_socket(QemuOpts *opts)
>                    qemu_opt_get_bool(opts, "server", 0) ? ",server" : "");
>       }
>
> -    if (is_listen&&  is_waitconnect) {
> -        printf("QEMU waiting for connection on: %s\n",
> -               chr->filename);
> -        tcp_chr_accept(chr);
> -        socket_set_nonblock(s->listen_fd);
> -    }
> -    return chr;
> +    s->chr = chr;
> +
> +    if(qemu_chr_connect_socket(s))
> +        return chr;
>
> - fail:
> -    if (fd>= 0)
> -        closesocket(fd);
> -    qemu_free(s);
>       qemu_free(chr);
> +    qemu_free(s);
> +
>       return NULL;
>   }
>
> diff --git a/qemu-char.h b/qemu-char.h
> index bcc0766..32bcfd7 100644
> --- a/qemu-char.h
> +++ b/qemu-char.h
> @@ -15,6 +15,7 @@
>   #define CHR_EVENT_MUX_IN  3 /* mux-focus was set to this terminal */
>   #define CHR_EVENT_MUX_OUT 4 /* mux-focus will move on */
>   #define CHR_EVENT_CLOSED  5 /* connection closed */
> +#define CHR_EVENT_RECONNECTED  6 /* reconnect event */
>
>
>   #define CHR_IOCTL_SERIAL_SET_PARAMS   1
> @@ -75,6 +76,7 @@ CharDriverState *qemu_chr_open_opts(QemuOpts *opts,
>                                       void (*init)(struct CharDriverState *s));
>   CharDriverState *qemu_chr_open(const char *label, const char *filename, void (*init)(struct CharDriverState *s));
>   void qemu_chr_close(CharDriverState *chr);
> +void qemu_chr_reconnect(void);
>   void qemu_chr_printf(CharDriverState *s, const char *fmt, ...);
>   int qemu_chr_write(CharDriverState *s, const uint8_t *buf, int len);
>   void qemu_chr_send_event(CharDriverState *s, int event);
> diff --git a/qemu-config.c b/qemu-config.c
> index c3203c8..a229350 100644
> --- a/qemu-config.c
> +++ b/qemu-config.c
> @@ -144,6 +144,9 @@ QemuOptsList qemu_chardev_opts = {
>           },{
>               .name = "signal",
>               .type = QEMU_OPT_BOOL,
> +        },{
> +            .name = "reconnect",
> +            .type = QEMU_OPT_NUMBER,
>           },
>           { /* end if list */ }
>       },
> diff --git a/vl.c b/vl.c
> index 6f1e1ab..bcd7d44 100644
> --- a/vl.c
> +++ b/vl.c
> @@ -3719,6 +3719,10 @@ void main_loop_wait(int timeout)
>
>       host_main_loop_wait(&timeout);
>
> +    /* Reconnect any disconnected sockets, if necessary */
> +
> +    qemu_chr_reconnect();
> +
>       /* poll any events */
>       /* XXX: separate device handlers from system ones */
>       nfds = -1;
>
Luiz Capitulino Feb. 1, 2010, 4:12 p.m. UTC | #2
On Mon, 01 Feb 2010 09:25:27 -0600
Anthony Liguori <anthony@codemonkey.ws> wrote:

> On 02/01/2010 07:34 AM, Ian Molton wrote:
> > 	Add a reconnect option that allows sockets to reconnect (after a
> > specified delay) to the specified server. This makes the virtio-rng driver
> > useful in production environments where the EGD server may need to be restarted.
> >
> > Signed-off-by: Ian Molton<ian.molton@collabora.co.uk>
> >    
> 
> I went back and looked at the last series and found my feedback.  I had 
> suggested that instead of automatically reconnecting, a mechanism should 
> be added for a user to initiate a reconnect.
> 
> Additionally, we should emit events upon disconnect through QMP (now 
> that we have that functionality).

 Should we merge all disconnect events or should we keep them
separated?

 I mean, we currently have VNC_DISCONNECT and will likely have
SPICE_DISCONNECT. Maybe we could have a SOCKET_DISCONNECT and have
a 'source' member, like:

{ "event": "SOCKET_DISCONNECT", "data": { "source": "vnc" ... } }

 I can see two drawbacks:

1. We can't do this for connects, which will make this inconsistent
2. Its "data" member is dependent on the source, which makes this
   event return a set of different info (say vnc auth types vs. auth
   types)

 So, I'd keep them separated.
Anthony Liguori Feb. 1, 2010, 4:49 p.m. UTC | #3
On 02/01/2010 10:12 AM, Luiz Capitulino wrote:
> On Mon, 01 Feb 2010 09:25:27 -0600
> Anthony Liguori<anthony@codemonkey.ws>  wrote:
>
>    
>> On 02/01/2010 07:34 AM, Ian Molton wrote:
>>      
>>> 	Add a reconnect option that allows sockets to reconnect (after a
>>> specified delay) to the specified server. This makes the virtio-rng driver
>>> useful in production environments where the EGD server may need to be restarted.
>>>
>>> Signed-off-by: Ian Molton<ian.molton@collabora.co.uk>
>>>
>>>        
>> I went back and looked at the last series and found my feedback.  I had
>> suggested that instead of automatically reconnecting, a mechanism should
>> be added for a user to initiate a reconnect.
>>
>> Additionally, we should emit events upon disconnect through QMP (now
>> that we have that functionality).
>>      
>   Should we merge all disconnect events or should we keep them
> separated?
>
>   I mean, we currently have VNC_DISCONNECT and will likely have
> SPICE_DISCONNECT. Maybe we could have a SOCKET_DISCONNECT and have
> a 'source' member, like:
>    

Good question.

I'd suggest for now, keep them separate events.  We can always merge 
them into a single event later and deprecate the old events.  That will 
give us a better idea of what the data payload needs to be.

Regards,

Anthony Liguori
Ian Molton Feb. 1, 2010, 10:44 p.m. UTC | #4
Anthony Liguori wrote:

> I went back and looked at the last series and found my feedback.  I had
> suggested that instead of automatically reconnecting, a mechanism should
> be added for a user to initiate a reconnect.

This sounds useful

> Additionally, we should emit events upon disconnect through QMP (now
> that we have that functionality).

This also.

> The main reason I dislike automatic reconnecting is that there is no
> correct way to handle the period of time while the VM is disconnected.

This is fine for egd protocol, the guest will just run low on entropy in
the meantime. Nothing should break.

> Auto reconnecting is implementing a policy to handle the failure within
> QEMU which is not universally the correct choice.  This isn't so bad
> except for the fact that you aren't providing the mechanisms for users
> to implement other policies which means they're stuck with this
> particular policy.

Perhaps this feature could be added if needed in future? It seems a bit
ambitious to get all this 'right' with no use cases to test against.

-Ian
Anthony Liguori Feb. 1, 2010, 10:54 p.m. UTC | #5
On 02/01/2010 04:44 PM, Ian Molton wrote:
> Anthony Liguori wrote:
>
>    
>> I went back and looked at the last series and found my feedback.  I had
>> suggested that instead of automatically reconnecting, a mechanism should
>> be added for a user to initiate a reconnect.
>>      
> This sounds useful
>
>    
>> Additionally, we should emit events upon disconnect through QMP (now
>> that we have that functionality).
>>      
> This also.
>
>    
>> The main reason I dislike automatic reconnecting is that there is no
>> correct way to handle the period of time while the VM is disconnected.
>>      
> This is fine for egd protocol, the guest will just run low on entropy in
> the meantime. Nothing should break.
>    

Right, but you're adding a generic piece of functionality.  For 
instance, what's the result of using reconnect with virtio-console?  Is 
this something that is going to work well?

>> Auto reconnecting is implementing a policy to handle the failure within
>> QEMU which is not universally the correct choice.  This isn't so bad
>> except for the fact that you aren't providing the mechanisms for users
>> to implement other policies which means they're stuck with this
>> particular policy.
>>      
> Perhaps this feature could be added if needed in future? It seems a bit
> ambitious to get all this 'right' with no use cases to test against.
>    

I'm all for doing things incrementally but there has to be a big picture 
that the incremental bit fits into otherwise you end up with a bunch of 
random features that don't work together well.

Honestly, I'd strongly suggest splitting the reconnect logic out of the 
series when resubmitting.  I think it's just too hacky with too weak of 
a justification.  If you really want this functionality, we can discuss 
the right approach for doing it but it's gotta be done in a way that's 
not introducing a one-off case just for the random number generator.

Regards,

Anthony Liguori

> -Ian
>
Ian Molton Feb. 2, 2010, 10:23 a.m. UTC | #6
Anthony Liguori wrote:

> I'm all for doing things incrementally but there has to be a big picture
> that the incremental bit fits into otherwise you end up with a bunch of
> random features that don't work together well.

Well, if you just add stuff without ever changing anything that went
before, of course.

> Honestly, I'd strongly suggest splitting the reconnect logic out of the
> series when resubmitting.

IMO the RNG stuff is worthless without the reconnect logic. You cant
have a machine in a production environment that just stops getting
entropy forever when you (say) restart the EGD, perhaps during a package
update. Or when someone unplugs the entropy source temporarily or
something like that.

>  I think it's just too hacky with too weak of
> a justification.  If you really want this functionality, we can discuss
> the right approach for doing it but it's gotta be done in a way that's
> not introducing a one-off case just for the random number generator.

I dont think its a case of 'really want' as much as 'its completely
essential' :-)

I still think that unless there are any other use cases, theres not much
to discuss - The code is already generic to some degree - it notifies
users, and its got a configurable delay. What else do we need? I
implemented it generically rather than stuff it into the virtio-rng
driver *because* I didnt think a dedicated version of it was the right
way to go, but without some other use cases, I cant see what good there
is in bikeshedding over this?

-Ian
diff mbox

Patch

diff --git a/qemu-char.c b/qemu-char.c
index 800ee6c..016afd0 100644
--- a/qemu-char.c
+++ b/qemu-char.c
@@ -1870,8 +1870,12 @@  typedef struct {
     int max_size;
     int do_telnetopt;
     int do_nodelay;
+    int reconnect;
     int is_unix;
     int msgfd;
+    QemuOpts *opts;
+    CharDriverState *chr;
+    int (*setup)(QemuOpts *opts);
 } TCPCharDriver;
 
 static void tcp_chr_accept(void *opaque);
@@ -2011,6 +2015,8 @@  static ssize_t tcp_chr_recv(CharDriverState *chr, char *buf, size_t len)
 }
 #endif
 
+static void qemu_chr_sched_reconnect(TCPCharDriver *s);
+
 static void tcp_chr_read(void *opaque)
 {
     CharDriverState *chr = opaque;
@@ -2030,10 +2036,16 @@  static void tcp_chr_read(void *opaque)
         if (s->listen_fd >= 0) {
             qemu_set_fd_handler(s->listen_fd, tcp_chr_accept, NULL, chr);
         }
-        qemu_set_fd_handler(s->fd, NULL, NULL, NULL);
+        if (!s->reconnect) {
+            qemu_set_fd_handler(s->fd, NULL, NULL, NULL);
+        }
         closesocket(s->fd);
         s->fd = -1;
-        qemu_chr_event(chr, CHR_EVENT_CLOSED);
+        if (s->reconnect) {
+            qemu_chr_sched_reconnect(s);
+        } else {
+            qemu_chr_event(chr, CHR_EVENT_CLOSED);
+        }
     } else if (size > 0) {
         if (s->do_telnetopt)
             tcp_chr_process_IAC_bytes(chr, s, buf, &size);
@@ -2133,11 +2145,92 @@  static void tcp_chr_close(CharDriverState *chr)
     qemu_chr_event(chr, CHR_EVENT_CLOSED);
 }
 
+static int qemu_chr_connect_socket(TCPCharDriver *s)
+{
+    QemuOpts *opts = s->opts;
+    int is_listen;
+    int fd;
+    int is_waitconnect;
+    int do_nodelay;
+
+    is_waitconnect = qemu_opt_get_bool(opts, "wait", 1);
+    is_listen      = qemu_opt_get_bool(opts, "server", 0);
+    do_nodelay     = !qemu_opt_get_bool(opts, "delay", 1);
+
+
+    fd = s->setup(s->opts);
+    if (fd < 0)
+        return 0;
+
+    if (!is_waitconnect)
+        socket_set_nonblock(fd);
+
+    if (is_listen) {
+        s->listen_fd = fd;
+        qemu_set_fd_handler(s->listen_fd, tcp_chr_accept, NULL, s->chr);
+        if (is_waitconnect) {
+            printf("QEMU waiting for connection on: %s\n",
+                   s->chr->filename);
+            tcp_chr_accept(s->chr);
+            socket_set_nonblock(s->listen_fd);
+        }
+    } else {
+        s->fd = fd;
+        socket_set_nodelay(fd);
+        tcp_chr_connect(s->chr);
+    }
+
+    return 1;
+}
+
+static QLIST_HEAD(reconnect_list_head, reconnect_list_entry) rcl_head;
+
+typedef struct reconnect_list_entry {
+    TCPCharDriver *s;
+    uint64_t when;
+    QLIST_ENTRY(reconnect_list_entry) entries;
+} reconnect_list_entry;
+
+static void qemu_chr_sched_reconnect(TCPCharDriver *s)
+{
+    reconnect_list_entry *new = qemu_malloc(sizeof(*new));
+    struct timeval tv;
+
+    qemu_gettimeofday(&tv);
+    new->s = s;
+    new->when = (s->reconnect + tv.tv_sec) * 1000000 + tv.tv_usec;
+    QLIST_INSERT_HEAD(&rcl_head, new, entries);
+}
+
+void qemu_chr_reconnect(void)
+{
+    struct timeval tv;
+    uint64_t now;
+    reconnect_list_entry *np;
+
+    if (!rcl_head.lh_first)
+        return;
+
+    gettimeofday(&tv, NULL);
+    now = tv.tv_sec * 1000000 + tv.tv_usec;
+
+    for (np = rcl_head.lh_first; np != NULL; np = np->entries.le_next) {
+        if (np->when <= now) {
+            if (qemu_chr_connect_socket(np->s)) {
+                qemu_chr_event(np->s->chr, CHR_EVENT_RECONNECTED);
+                QLIST_REMOVE(np, entries);
+            }
+            else {
+                np->when += np->s->reconnect * 1000000;
+            }
+        }
+    }
+}
+
 static CharDriverState *qemu_chr_open_socket(QemuOpts *opts)
 {
     CharDriverState *chr = NULL;
     TCPCharDriver *s = NULL;
-    int fd = -1;
     int is_listen;
     int is_waitconnect;
     int do_nodelay;
@@ -2145,34 +2238,40 @@  static CharDriverState *qemu_chr_open_socket(QemuOpts *opts)
     int is_telnet;
 
     is_listen      = qemu_opt_get_bool(opts, "server", 0);
+    is_unix        = qemu_opt_get(opts, "path") != NULL;
+
     is_waitconnect = qemu_opt_get_bool(opts, "wait", 1);
     is_telnet      = qemu_opt_get_bool(opts, "telnet", 0);
     do_nodelay     = !qemu_opt_get_bool(opts, "delay", 1);
-    is_unix        = qemu_opt_get(opts, "path") != NULL;
-    if (!is_listen)
+
+    if (!is_listen) {
         is_waitconnect = 0;
+    } else {
+        if (is_telnet)
+            s->do_telnetopt = 1;
+    }
+
 
-    chr = qemu_mallocz(sizeof(CharDriverState));
     s = qemu_mallocz(sizeof(TCPCharDriver));
+    chr = qemu_mallocz(sizeof(CharDriverState));
+    s->opts = opts;
+
+    if (!is_listen && !is_telnet)
+        s->reconnect = qemu_opt_get_number(opts, "reconnect", 0);
 
     if (is_unix) {
         if (is_listen) {
-            fd = unix_listen_opts(opts);
+            s->setup = unix_listen_opts;
         } else {
-            fd = unix_connect_opts(opts);
+            s->setup = unix_connect_opts;
         }
     } else {
         if (is_listen) {
-            fd = inet_listen_opts(opts, 0);
+            s->setup = inet_listen_opts;
         } else {
-            fd = inet_connect_opts(opts);
+            s->setup = inet_connect_opts;
         }
     }
-    if (fd < 0)
-        goto fail;
-
-    if (!is_waitconnect)
-        socket_set_nonblock(fd);
 
     s->connected = 0;
     s->fd = -1;
@@ -2186,19 +2285,6 @@  static CharDriverState *qemu_chr_open_socket(QemuOpts *opts)
     chr->chr_close = tcp_chr_close;
     chr->get_msgfd = tcp_get_msgfd;
 
-    if (is_listen) {
-        s->listen_fd = fd;
-        qemu_set_fd_handler(s->listen_fd, tcp_chr_accept, NULL, chr);
-        if (is_telnet)
-            s->do_telnetopt = 1;
-
-    } else {
-        s->connected = 1;
-        s->fd = fd;
-        socket_set_nodelay(fd);
-        tcp_chr_connect(chr);
-    }
-
     /* for "info chardev" monitor command */
     chr->filename = qemu_malloc(256);
     if (is_unix) {
@@ -2215,19 +2301,14 @@  static CharDriverState *qemu_chr_open_socket(QemuOpts *opts)
                  qemu_opt_get_bool(opts, "server", 0) ? ",server" : "");
     }
 
-    if (is_listen && is_waitconnect) {
-        printf("QEMU waiting for connection on: %s\n",
-               chr->filename);
-        tcp_chr_accept(chr);
-        socket_set_nonblock(s->listen_fd);
-    }
-    return chr;
+    s->chr = chr;
+
+    if(qemu_chr_connect_socket(s))
+        return chr;
 
- fail:
-    if (fd >= 0)
-        closesocket(fd);
-    qemu_free(s);
     qemu_free(chr);
+    qemu_free(s);
+
     return NULL;
 }
 
diff --git a/qemu-char.h b/qemu-char.h
index bcc0766..32bcfd7 100644
--- a/qemu-char.h
+++ b/qemu-char.h
@@ -15,6 +15,7 @@ 
 #define CHR_EVENT_MUX_IN  3 /* mux-focus was set to this terminal */
 #define CHR_EVENT_MUX_OUT 4 /* mux-focus will move on */
 #define CHR_EVENT_CLOSED  5 /* connection closed */
+#define CHR_EVENT_RECONNECTED  6 /* reconnect event */
 
 
 #define CHR_IOCTL_SERIAL_SET_PARAMS   1
@@ -75,6 +76,7 @@  CharDriverState *qemu_chr_open_opts(QemuOpts *opts,
                                     void (*init)(struct CharDriverState *s));
 CharDriverState *qemu_chr_open(const char *label, const char *filename, void (*init)(struct CharDriverState *s));
 void qemu_chr_close(CharDriverState *chr);
+void qemu_chr_reconnect(void);
 void qemu_chr_printf(CharDriverState *s, const char *fmt, ...);
 int qemu_chr_write(CharDriverState *s, const uint8_t *buf, int len);
 void qemu_chr_send_event(CharDriverState *s, int event);
diff --git a/qemu-config.c b/qemu-config.c
index c3203c8..a229350 100644
--- a/qemu-config.c
+++ b/qemu-config.c
@@ -144,6 +144,9 @@  QemuOptsList qemu_chardev_opts = {
         },{
             .name = "signal",
             .type = QEMU_OPT_BOOL,
+        },{
+            .name = "reconnect",
+            .type = QEMU_OPT_NUMBER,
         },
         { /* end if list */ }
     },
diff --git a/vl.c b/vl.c
index 6f1e1ab..bcd7d44 100644
--- a/vl.c
+++ b/vl.c
@@ -3719,6 +3719,10 @@  void main_loop_wait(int timeout)
 
     host_main_loop_wait(&timeout);
 
+    /* Reconnect any disconnected sockets, if necessary */
+
+    qemu_chr_reconnect();
+
     /* poll any events */
     /* XXX: separate device handlers from system ones */
     nfds = -1;