Message ID | 20110909120844.13697.48102.stgit@localhost6.localdomain6 |
---|---|
State | Not Applicable, archived |
Delegated to: | David Miller |
Headers | show |
On Fri, 09 Sep 2011 16:08:44 +0400 Stanislav Kinsbursky <skinsbursky@parallels.com> wrote: > Create rcbind clients or increase rpcbind users counter during RPC service > creation and decrease this counter (and possibly destroy those clients) on RPC > service destruction. > > Signed-off-by: Stanislav Kinsbursky <skinsbursky@parallels.com> > > --- > include/linux/sunrpc/clnt.h | 2 ++ > net/sunrpc/rpcb_clnt.c | 2 +- > net/sunrpc/svc.c | 13 +++++++++++-- > 3 files changed, 14 insertions(+), 3 deletions(-) > > diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h > index db7bcaf..65a8115 100644 > --- a/include/linux/sunrpc/clnt.h > +++ b/include/linux/sunrpc/clnt.h > @@ -135,10 +135,12 @@ void rpc_shutdown_client(struct rpc_clnt *); > void rpc_release_client(struct rpc_clnt *); > void rpc_task_release_client(struct rpc_task *); > > +int rpcb_create_local(void); > int rpcb_register(u32, u32, int, unsigned short); > int rpcb_v4_register(const u32 program, const u32 version, > const struct sockaddr *address, > const char *netid); > +void rpcb_put_local(void); > void rpcb_getport_async(struct rpc_task *); > > void rpc_call_start(struct rpc_task *); > diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c > index b4cc0f1..437ec60 100644 > --- a/net/sunrpc/rpcb_clnt.c > +++ b/net/sunrpc/rpcb_clnt.c > @@ -318,7 +318,7 @@ out: > * Returns zero on success, otherwise a negative errno value > * is returned. > */ > -static int rpcb_create_local(void) > +int rpcb_create_local(void) > { > static DEFINE_MUTEX(rpcb_create_local_mutex); > int result = 0; > diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c > index 6a69a11..9095c0e 100644 > --- a/net/sunrpc/svc.c > +++ b/net/sunrpc/svc.c > @@ -367,8 +367,11 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, > unsigned int xdrsize; > unsigned int i; > > - if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) > + if (rpcb_create_local() < 0) > return NULL; > + > + if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) > + goto out_err; > serv->sv_name = prog->pg_name; > serv->sv_program = prog; > serv->sv_nrthreads = 1; > @@ -403,7 +406,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, > GFP_KERNEL); > if (!serv->sv_pools) { > kfree(serv); > - return NULL; > + goto out_err; > } > > for (i = 0; i < serv->sv_nrpools; i++) { > @@ -423,6 +426,10 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, > svc_unregister(serv); > > return serv; > + > +out_err: > + rpcb_put_local(); > + return NULL; > } > > struct svc_serv * > @@ -491,6 +498,8 @@ svc_destroy(struct svc_serv *serv) > svc_unregister(serv); > kfree(serv->sv_pools); > kfree(serv); > + > + rpcb_put_local(); > } > EXPORT_SYMBOL_GPL(svc_destroy); > > I don't get it -- what's the advantage of creating rpcbind clients in __svc_create vs. the old way of creating them just before we plan to use them? With this scheme, won't we end up creating rpcbind sockets even when we don't need them? For instance, if I create a callback socket for NFSv4 then I don't really need to talk to rpcbind. With this patch I'll still get the rpcbind clients created though. It would seem to me to make more sense to create the rpcbind clients somewhere closer to svc_setup_socket, and only if SVC_SOCK_ANONYMOUS is not set.
09.09.2011 18:07, Jeff Layton пишет: > On Fri, 09 Sep 2011 16:08:44 +0400 > Stanislav Kinsbursky<skinsbursky@parallels.com> wrote: > >> Create rcbind clients or increase rpcbind users counter during RPC service >> creation and decrease this counter (and possibly destroy those clients) on RPC >> service destruction. >> >> Signed-off-by: Stanislav Kinsbursky<skinsbursky@parallels.com> >> >> --- >> include/linux/sunrpc/clnt.h | 2 ++ >> net/sunrpc/rpcb_clnt.c | 2 +- >> net/sunrpc/svc.c | 13 +++++++++++-- >> 3 files changed, 14 insertions(+), 3 deletions(-) >> >> diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h >> index db7bcaf..65a8115 100644 >> --- a/include/linux/sunrpc/clnt.h >> +++ b/include/linux/sunrpc/clnt.h >> @@ -135,10 +135,12 @@ void rpc_shutdown_client(struct rpc_clnt *); >> void rpc_release_client(struct rpc_clnt *); >> void rpc_task_release_client(struct rpc_task *); >> >> +int rpcb_create_local(void); >> int rpcb_register(u32, u32, int, unsigned short); >> int rpcb_v4_register(const u32 program, const u32 version, >> const struct sockaddr *address, >> const char *netid); >> +void rpcb_put_local(void); >> void rpcb_getport_async(struct rpc_task *); >> >> void rpc_call_start(struct rpc_task *); >> diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c >> index b4cc0f1..437ec60 100644 >> --- a/net/sunrpc/rpcb_clnt.c >> +++ b/net/sunrpc/rpcb_clnt.c >> @@ -318,7 +318,7 @@ out: >> * Returns zero on success, otherwise a negative errno value >> * is returned. >> */ >> -static int rpcb_create_local(void) >> +int rpcb_create_local(void) >> { >> static DEFINE_MUTEX(rpcb_create_local_mutex); >> int result = 0; >> diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c >> index 6a69a11..9095c0e 100644 >> --- a/net/sunrpc/svc.c >> +++ b/net/sunrpc/svc.c >> @@ -367,8 +367,11 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, >> unsigned int xdrsize; >> unsigned int i; >> >> - if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) >> + if (rpcb_create_local()< 0) >> return NULL; >> + >> + if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) >> + goto out_err; >> serv->sv_name = prog->pg_name; >> serv->sv_program = prog; >> serv->sv_nrthreads = 1; >> @@ -403,7 +406,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, >> GFP_KERNEL); >> if (!serv->sv_pools) { >> kfree(serv); >> - return NULL; >> + goto out_err; >> } >> >> for (i = 0; i< serv->sv_nrpools; i++) { >> @@ -423,6 +426,10 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, >> svc_unregister(serv); >> >> return serv; >> + >> +out_err: >> + rpcb_put_local(); >> + return NULL; >> } >> >> struct svc_serv * >> @@ -491,6 +498,8 @@ svc_destroy(struct svc_serv *serv) >> svc_unregister(serv); >> kfree(serv->sv_pools); >> kfree(serv); >> + >> + rpcb_put_local(); >> } >> EXPORT_SYMBOL_GPL(svc_destroy); >> >> > > I don't get it -- what's the advantage of creating rpcbind clients in > __svc_create vs. the old way of creating them just before we plan to > use them? > The main problem here is not in creation, but in destroying those clients. Now rpcbind clients are created during rpcb_register(). I.e. once per every family, program version and so on. But can be unregistered for all protocol families by one call. So it's impossible to put reference counting for those clients in the place, where they are created now. > With this scheme, won't we end up creating rpcbind sockets even when we > don't need them? For instance, if I create a callback socket for NFSv4 > then I don't really need to talk to rpcbind. With this patch I'll still > get the rpcbind clients created though. > Yep, you right. This is not a real problem from my pow. But I'll think about how to avoid this creation for nfs callbacks. > It would seem to me to make more sense to create the rpcbind clients > somewhere closer to svc_setup_socket, and only if SVC_SOCK_ANONYMOUS is > not set. > Probably. Will try to find better place.
On Fri, 2011-09-09 at 20:41 +0400, Stanislav Kinsbursky wrote: > 09.09.2011 18:07, Jeff Layton пишет: > > On Fri, 09 Sep 2011 16:08:44 +0400 > > Stanislav Kinsbursky<skinsbursky@parallels.com> wrote: > > > >> Create rcbind clients or increase rpcbind users counter during RPC service > >> creation and decrease this counter (and possibly destroy those clients) on RPC > >> service destruction. > >> > >> Signed-off-by: Stanislav Kinsbursky<skinsbursky@parallels.com> > >> > >> --- > >> include/linux/sunrpc/clnt.h | 2 ++ > >> net/sunrpc/rpcb_clnt.c | 2 +- > >> net/sunrpc/svc.c | 13 +++++++++++-- > >> 3 files changed, 14 insertions(+), 3 deletions(-) > >> > >> diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h > >> index db7bcaf..65a8115 100644 > >> --- a/include/linux/sunrpc/clnt.h > >> +++ b/include/linux/sunrpc/clnt.h > >> @@ -135,10 +135,12 @@ void rpc_shutdown_client(struct rpc_clnt *); > >> void rpc_release_client(struct rpc_clnt *); > >> void rpc_task_release_client(struct rpc_task *); > >> > >> +int rpcb_create_local(void); > >> int rpcb_register(u32, u32, int, unsigned short); > >> int rpcb_v4_register(const u32 program, const u32 version, > >> const struct sockaddr *address, > >> const char *netid); > >> +void rpcb_put_local(void); > >> void rpcb_getport_async(struct rpc_task *); > >> > >> void rpc_call_start(struct rpc_task *); > >> diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c > >> index b4cc0f1..437ec60 100644 > >> --- a/net/sunrpc/rpcb_clnt.c > >> +++ b/net/sunrpc/rpcb_clnt.c > >> @@ -318,7 +318,7 @@ out: > >> * Returns zero on success, otherwise a negative errno value > >> * is returned. > >> */ > >> -static int rpcb_create_local(void) > >> +int rpcb_create_local(void) > >> { > >> static DEFINE_MUTEX(rpcb_create_local_mutex); > >> int result = 0; > >> diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c > >> index 6a69a11..9095c0e 100644 > >> --- a/net/sunrpc/svc.c > >> +++ b/net/sunrpc/svc.c > >> @@ -367,8 +367,11 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, > >> unsigned int xdrsize; > >> unsigned int i; > >> > >> - if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) > >> + if (rpcb_create_local()< 0) > >> return NULL; > >> + > >> + if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) > >> + goto out_err; > >> serv->sv_name = prog->pg_name; > >> serv->sv_program = prog; > >> serv->sv_nrthreads = 1; > >> @@ -403,7 +406,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, > >> GFP_KERNEL); > >> if (!serv->sv_pools) { > >> kfree(serv); > >> - return NULL; > >> + goto out_err; > >> } > >> > >> for (i = 0; i< serv->sv_nrpools; i++) { > >> @@ -423,6 +426,10 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, > >> svc_unregister(serv); > >> > >> return serv; > >> + > >> +out_err: > >> + rpcb_put_local(); > >> + return NULL; > >> } > >> > >> struct svc_serv * > >> @@ -491,6 +498,8 @@ svc_destroy(struct svc_serv *serv) > >> svc_unregister(serv); > >> kfree(serv->sv_pools); > >> kfree(serv); > >> + > >> + rpcb_put_local(); > >> } > >> EXPORT_SYMBOL_GPL(svc_destroy); > >> > >> > > > > I don't get it -- what's the advantage of creating rpcbind clients in > > __svc_create vs. the old way of creating them just before we plan to > > use them? > > > > The main problem here is not in creation, but in destroying those clients. > Now rpcbind clients are created during rpcb_register(). I.e. once per every family, program version and so on. > But can be unregistered for all protocol families by one call. So it's impossible to put reference counting for those clients in the place, where they are created now. Could we perhaps set up a 'struct pernet_operations' to create a destructor for them? Cheers Trond
On Fri, 09 Sep 2011 14:44:34 -0400 Trond Myklebust <Trond.Myklebust@netapp.com> wrote: > On Fri, 2011-09-09 at 20:41 +0400, Stanislav Kinsbursky wrote: > > 09.09.2011 18:07, Jeff Layton пишет: > > > On Fri, 09 Sep 2011 16:08:44 +0400 > > > Stanislav Kinsbursky<skinsbursky@parallels.com> wrote: > > > > > >> Create rcbind clients or increase rpcbind users counter during RPC service > > >> creation and decrease this counter (and possibly destroy those clients) on RPC > > >> service destruction. > > >> > > >> Signed-off-by: Stanislav Kinsbursky<skinsbursky@parallels.com> > > >> > > >> --- > > >> include/linux/sunrpc/clnt.h | 2 ++ > > >> net/sunrpc/rpcb_clnt.c | 2 +- > > >> net/sunrpc/svc.c | 13 +++++++++++-- > > >> 3 files changed, 14 insertions(+), 3 deletions(-) > > >> > > >> diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h > > >> index db7bcaf..65a8115 100644 > > >> --- a/include/linux/sunrpc/clnt.h > > >> +++ b/include/linux/sunrpc/clnt.h > > >> @@ -135,10 +135,12 @@ void rpc_shutdown_client(struct rpc_clnt *); > > >> void rpc_release_client(struct rpc_clnt *); > > >> void rpc_task_release_client(struct rpc_task *); > > >> > > >> +int rpcb_create_local(void); > > >> int rpcb_register(u32, u32, int, unsigned short); > > >> int rpcb_v4_register(const u32 program, const u32 version, > > >> const struct sockaddr *address, > > >> const char *netid); > > >> +void rpcb_put_local(void); > > >> void rpcb_getport_async(struct rpc_task *); > > >> > > >> void rpc_call_start(struct rpc_task *); > > >> diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c > > >> index b4cc0f1..437ec60 100644 > > >> --- a/net/sunrpc/rpcb_clnt.c > > >> +++ b/net/sunrpc/rpcb_clnt.c > > >> @@ -318,7 +318,7 @@ out: > > >> * Returns zero on success, otherwise a negative errno value > > >> * is returned. > > >> */ > > >> -static int rpcb_create_local(void) > > >> +int rpcb_create_local(void) > > >> { > > >> static DEFINE_MUTEX(rpcb_create_local_mutex); > > >> int result = 0; > > >> diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c > > >> index 6a69a11..9095c0e 100644 > > >> --- a/net/sunrpc/svc.c > > >> +++ b/net/sunrpc/svc.c > > >> @@ -367,8 +367,11 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, > > >> unsigned int xdrsize; > > >> unsigned int i; > > >> > > >> - if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) > > >> + if (rpcb_create_local()< 0) > > >> return NULL; > > >> + > > >> + if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) > > >> + goto out_err; > > >> serv->sv_name = prog->pg_name; > > >> serv->sv_program = prog; > > >> serv->sv_nrthreads = 1; > > >> @@ -403,7 +406,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, > > >> GFP_KERNEL); > > >> if (!serv->sv_pools) { > > >> kfree(serv); > > >> - return NULL; > > >> + goto out_err; > > >> } > > >> > > >> for (i = 0; i< serv->sv_nrpools; i++) { > > >> @@ -423,6 +426,10 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, > > >> svc_unregister(serv); > > >> > > >> return serv; > > >> + > > >> +out_err: > > >> + rpcb_put_local(); > > >> + return NULL; > > >> } > > >> > > >> struct svc_serv * > > >> @@ -491,6 +498,8 @@ svc_destroy(struct svc_serv *serv) > > >> svc_unregister(serv); > > >> kfree(serv->sv_pools); > > >> kfree(serv); > > >> + > > >> + rpcb_put_local(); > > >> } > > >> EXPORT_SYMBOL_GPL(svc_destroy); > > >> > > >> > > > > > > I don't get it -- what's the advantage of creating rpcbind clients in > > > __svc_create vs. the old way of creating them just before we plan to > > > use them? > > > > > > > The main problem here is not in creation, but in destroying those clients. > > Now rpcbind clients are created during rpcb_register(). I.e. once per every family, program version and so on. > > But can be unregistered for all protocol families by one call. So it's impossible to put reference counting for those clients in the place, where they are created now. > > Could we perhaps set up a 'struct pernet_operations' to create a > destructor for them? > An even easier idea might be to just not take a reference to the rpcbind client for svc_programs that have vs_hidden set on every version.
On Fri, 2011-09-09 at 15:01 -0400, Jeff Layton wrote: > On Fri, 09 Sep 2011 14:44:34 -0400 > Trond Myklebust <Trond.Myklebust@netapp.com> wrote: > > > On Fri, 2011-09-09 at 20:41 +0400, Stanislav Kinsbursky wrote: > > > 09.09.2011 18:07, Jeff Layton пишет: > > > > On Fri, 09 Sep 2011 16:08:44 +0400 > > > > Stanislav Kinsbursky<skinsbursky@parallels.com> wrote: > > > > > > > >> Create rcbind clients or increase rpcbind users counter during RPC service > > > >> creation and decrease this counter (and possibly destroy those clients) on RPC > > > >> service destruction. > > > >> > > > >> Signed-off-by: Stanislav Kinsbursky<skinsbursky@parallels.com> > > > >> > > > >> --- > > > >> include/linux/sunrpc/clnt.h | 2 ++ > > > >> net/sunrpc/rpcb_clnt.c | 2 +- > > > >> net/sunrpc/svc.c | 13 +++++++++++-- > > > >> 3 files changed, 14 insertions(+), 3 deletions(-) > > > >> > > > >> diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h > > > >> index db7bcaf..65a8115 100644 > > > >> --- a/include/linux/sunrpc/clnt.h > > > >> +++ b/include/linux/sunrpc/clnt.h > > > >> @@ -135,10 +135,12 @@ void rpc_shutdown_client(struct rpc_clnt *); > > > >> void rpc_release_client(struct rpc_clnt *); > > > >> void rpc_task_release_client(struct rpc_task *); > > > >> > > > >> +int rpcb_create_local(void); > > > >> int rpcb_register(u32, u32, int, unsigned short); > > > >> int rpcb_v4_register(const u32 program, const u32 version, > > > >> const struct sockaddr *address, > > > >> const char *netid); > > > >> +void rpcb_put_local(void); > > > >> void rpcb_getport_async(struct rpc_task *); > > > >> > > > >> void rpc_call_start(struct rpc_task *); > > > >> diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c > > > >> index b4cc0f1..437ec60 100644 > > > >> --- a/net/sunrpc/rpcb_clnt.c > > > >> +++ b/net/sunrpc/rpcb_clnt.c > > > >> @@ -318,7 +318,7 @@ out: > > > >> * Returns zero on success, otherwise a negative errno value > > > >> * is returned. > > > >> */ > > > >> -static int rpcb_create_local(void) > > > >> +int rpcb_create_local(void) > > > >> { > > > >> static DEFINE_MUTEX(rpcb_create_local_mutex); > > > >> int result = 0; > > > >> diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c > > > >> index 6a69a11..9095c0e 100644 > > > >> --- a/net/sunrpc/svc.c > > > >> +++ b/net/sunrpc/svc.c > > > >> @@ -367,8 +367,11 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, > > > >> unsigned int xdrsize; > > > >> unsigned int i; > > > >> > > > >> - if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) > > > >> + if (rpcb_create_local()< 0) > > > >> return NULL; > > > >> + > > > >> + if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) > > > >> + goto out_err; > > > >> serv->sv_name = prog->pg_name; > > > >> serv->sv_program = prog; > > > >> serv->sv_nrthreads = 1; > > > >> @@ -403,7 +406,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, > > > >> GFP_KERNEL); > > > >> if (!serv->sv_pools) { > > > >> kfree(serv); > > > >> - return NULL; > > > >> + goto out_err; > > > >> } > > > >> > > > >> for (i = 0; i< serv->sv_nrpools; i++) { > > > >> @@ -423,6 +426,10 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, > > > >> svc_unregister(serv); > > > >> > > > >> return serv; > > > >> + > > > >> +out_err: > > > >> + rpcb_put_local(); > > > >> + return NULL; > > > >> } > > > >> > > > >> struct svc_serv * > > > >> @@ -491,6 +498,8 @@ svc_destroy(struct svc_serv *serv) > > > >> svc_unregister(serv); > > > >> kfree(serv->sv_pools); > > > >> kfree(serv); > > > >> + > > > >> + rpcb_put_local(); > > > >> } > > > >> EXPORT_SYMBOL_GPL(svc_destroy); > > > >> > > > >> > > > > > > > > I don't get it -- what's the advantage of creating rpcbind clients in > > > > __svc_create vs. the old way of creating them just before we plan to > > > > use them? > > > > > > > > > > The main problem here is not in creation, but in destroying those clients. > > > Now rpcbind clients are created during rpcb_register(). I.e. once per every family, program version and so on. > > > But can be unregistered for all protocol families by one call. So it's impossible to put reference counting for those clients in the place, where they are created now. > > > > Could we perhaps set up a 'struct pernet_operations' to create a > > destructor for them? > > > > An even easier idea might be to just not take a reference to the > rpcbind client for svc_programs that have vs_hidden set on every > version. Isn't the problem that Stanislav is trying to solve that we need to be able to register and unregister RPC services to the correct rpcbind server, depending on which net namespace we are in? My understanding is that the current code will register everything to whatever rpcbind server is running in the init net namespace because that's what rpcb_create_local() uses. My suggestion is to use a struct pernet_operations to detect when a net namespace is being created or destroyed, so that the rpcbind client code knows when to create or destroy a connection to the server that is running in that namespace. Cheers Trond
> Isn't the problem that Stanislav is trying to solve that we need to be > able to register and unregister RPC services to the correct rpcbind > server, depending on which net namespace we are in? > > My understanding is that the current code will register everything to > whatever rpcbind server is running in the init net namespace because > that's what rpcb_create_local() uses. > > My suggestion is to use a struct pernet_operations to detect when a net > namespace is being created or destroyed, so that the rpcbind client code > knows when to create or destroy a connection to the server that is > running in that namespace. The problem is that the client has a reference on transport, the transport gets a socket and the socket holds the net namesace and thus we will not receive the net->exit event with this approach. Another option is to break the client's socket reference and kill the rpcb client on netns stop (i.e. as you propose), but in that case we have another problem - how to destroy _other_ clients (not rpc bind ones) when the net namespace stops. If you prefer either way just let us know, we'll fix the patches. > Cheers > Trond -- To unsubscribe from this list: send the line "unsubscribe netdev" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
09.09.2011 23:25, Trond Myklebust пишет: > On Fri, 2011-09-09 at 15:01 -0400, Jeff Layton wrote: >> On Fri, 09 Sep 2011 14:44:34 -0400 >> Trond Myklebust<Trond.Myklebust@netapp.com> wrote: >> >>> On Fri, 2011-09-09 at 20:41 +0400, Stanislav Kinsbursky wrote: >>>> 09.09.2011 18:07, Jeff Layton пишет: >>>>> On Fri, 09 Sep 2011 16:08:44 +0400 >>>>> Stanislav Kinsbursky<skinsbursky@parallels.com> wrote: >>>>> >>>>>> Create rcbind clients or increase rpcbind users counter during RPC service >>>>>> creation and decrease this counter (and possibly destroy those clients) on RPC >>>>>> service destruction. >>>>>> >>>>>> Signed-off-by: Stanislav Kinsbursky<skinsbursky@parallels.com> >>>>>> >>>>>> --- >>>>>> include/linux/sunrpc/clnt.h | 2 ++ >>>>>> net/sunrpc/rpcb_clnt.c | 2 +- >>>>>> net/sunrpc/svc.c | 13 +++++++++++-- >>>>>> 3 files changed, 14 insertions(+), 3 deletions(-) >>>>>> >>>>>> diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h >>>>>> index db7bcaf..65a8115 100644 >>>>>> --- a/include/linux/sunrpc/clnt.h >>>>>> +++ b/include/linux/sunrpc/clnt.h >>>>>> @@ -135,10 +135,12 @@ void rpc_shutdown_client(struct rpc_clnt *); >>>>>> void rpc_release_client(struct rpc_clnt *); >>>>>> void rpc_task_release_client(struct rpc_task *); >>>>>> >>>>>> +int rpcb_create_local(void); >>>>>> int rpcb_register(u32, u32, int, unsigned short); >>>>>> int rpcb_v4_register(const u32 program, const u32 version, >>>>>> const struct sockaddr *address, >>>>>> const char *netid); >>>>>> +void rpcb_put_local(void); >>>>>> void rpcb_getport_async(struct rpc_task *); >>>>>> >>>>>> void rpc_call_start(struct rpc_task *); >>>>>> diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c >>>>>> index b4cc0f1..437ec60 100644 >>>>>> --- a/net/sunrpc/rpcb_clnt.c >>>>>> +++ b/net/sunrpc/rpcb_clnt.c >>>>>> @@ -318,7 +318,7 @@ out: >>>>>> * Returns zero on success, otherwise a negative errno value >>>>>> * is returned. >>>>>> */ >>>>>> -static int rpcb_create_local(void) >>>>>> +int rpcb_create_local(void) >>>>>> { >>>>>> static DEFINE_MUTEX(rpcb_create_local_mutex); >>>>>> int result = 0; >>>>>> diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c >>>>>> index 6a69a11..9095c0e 100644 >>>>>> --- a/net/sunrpc/svc.c >>>>>> +++ b/net/sunrpc/svc.c >>>>>> @@ -367,8 +367,11 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, >>>>>> unsigned int xdrsize; >>>>>> unsigned int i; >>>>>> >>>>>> - if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) >>>>>> + if (rpcb_create_local()< 0) >>>>>> return NULL; >>>>>> + >>>>>> + if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) >>>>>> + goto out_err; >>>>>> serv->sv_name = prog->pg_name; >>>>>> serv->sv_program = prog; >>>>>> serv->sv_nrthreads = 1; >>>>>> @@ -403,7 +406,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, >>>>>> GFP_KERNEL); >>>>>> if (!serv->sv_pools) { >>>>>> kfree(serv); >>>>>> - return NULL; >>>>>> + goto out_err; >>>>>> } >>>>>> >>>>>> for (i = 0; i< serv->sv_nrpools; i++) { >>>>>> @@ -423,6 +426,10 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, >>>>>> svc_unregister(serv); >>>>>> >>>>>> return serv; >>>>>> + >>>>>> +out_err: >>>>>> + rpcb_put_local(); >>>>>> + return NULL; >>>>>> } >>>>>> >>>>>> struct svc_serv * >>>>>> @@ -491,6 +498,8 @@ svc_destroy(struct svc_serv *serv) >>>>>> svc_unregister(serv); >>>>>> kfree(serv->sv_pools); >>>>>> kfree(serv); >>>>>> + >>>>>> + rpcb_put_local(); >>>>>> } >>>>>> EXPORT_SYMBOL_GPL(svc_destroy); >>>>>> >>>>>> >>>>> >>>>> I don't get it -- what's the advantage of creating rpcbind clients in >>>>> __svc_create vs. the old way of creating them just before we plan to >>>>> use them? >>>>> >>>> >>>> The main problem here is not in creation, but in destroying those clients. >>>> Now rpcbind clients are created during rpcb_register(). I.e. once per every family, program version and so on. >>>> But can be unregistered for all protocol families by one call. So it's impossible to put reference counting for those clients in the place, where they are created now. >>> >>> Could we perhaps set up a 'struct pernet_operations' to create a >>> destructor for them? >>> >> >> An even easier idea might be to just not take a reference to the >> rpcbind client for svc_programs that have vs_hidden set on every >> version. > > Isn't the problem that Stanislav is trying to solve that we need to be > able to register and unregister RPC services to the correct rpcbind > server, depending on which net namespace we are in? > Yes, it is. I'm going to make rpcbind clients per net namespace. > My understanding is that the current code will register everything to > whatever rpcbind server is running in the init net namespace because > that's what rpcb_create_local() uses. > > My suggestion is to use a struct pernet_operations to detect when a net > namespace is being created or destroyed, so that the rpcbind client code > knows when to create or destroy a connection to the server that is > running in that namespace. > But as Pavel mentioned already, we can't use netns destructor for rpcbind clients because they holds netns reference. That's why we have to untie netns from rpbind clients first. Another solution is to not increment netns ref counter for rpcbind sockets as, again, Pavel already mentioned. But first approach looks clearer from my pow. That's why I'm trying to make rcpbind client's self-destructible.After achieving this we can just make this rpcbind clients per netns and then we can virtualize lockd. I've tried to find some better place for creating rpcbind clients (instead of __svc_create()). But this place looks like the best one for current solution. About avoiding of creation of rpcbind clients for nfs 4.* callbacks. Probably, we can implement init-fini calls for svc_program structure (rpcb_create_local() and rpcb_put_local() will be used in our case) and then inherit them for svc_serv. This will allow us to call this hooks only if they defined and thus avoid rpcbind clients creation for nfs callbacks. What all of you think about this hook's idea? > Cheers > Trond
12.09.2011 15:22, Stanislav Kinsbursky пишет: > 09.09.2011 23:25, Trond Myklebust пишет: >> On Fri, 2011-09-09 at 15:01 -0400, Jeff Layton wrote: >>> On Fri, 09 Sep 2011 14:44:34 -0400 >>> Trond Myklebust<Trond.Myklebust@netapp.com> wrote: >>> >>>> On Fri, 2011-09-09 at 20:41 +0400, Stanislav Kinsbursky wrote: >>>>> 09.09.2011 18:07, Jeff Layton пишет: >>>>>> On Fri, 09 Sep 2011 16:08:44 +0400 >>>>>> Stanislav Kinsbursky<skinsbursky@parallels.com> wrote: >>>>>> >>>>>>> Create rcbind clients or increase rpcbind users counter during RPC service >>>>>>> creation and decrease this counter (and possibly destroy those clients) on RPC >>>>>>> service destruction. >>>>>>> >>>>>>> Signed-off-by: Stanislav Kinsbursky<skinsbursky@parallels.com> >>>>>>> >>>>>>> --- >>>>>>> include/linux/sunrpc/clnt.h | 2 ++ >>>>>>> net/sunrpc/rpcb_clnt.c | 2 +- >>>>>>> net/sunrpc/svc.c | 13 +++++++++++-- >>>>>>> 3 files changed, 14 insertions(+), 3 deletions(-) >>>>>>> >>>>>>> diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h >>>>>>> index db7bcaf..65a8115 100644 >>>>>>> --- a/include/linux/sunrpc/clnt.h >>>>>>> +++ b/include/linux/sunrpc/clnt.h >>>>>>> @@ -135,10 +135,12 @@ void rpc_shutdown_client(struct rpc_clnt *); >>>>>>> void rpc_release_client(struct rpc_clnt *); >>>>>>> void rpc_task_release_client(struct rpc_task *); >>>>>>> >>>>>>> +int rpcb_create_local(void); >>>>>>> int rpcb_register(u32, u32, int, unsigned short); >>>>>>> int rpcb_v4_register(const u32 program, const u32 version, >>>>>>> const struct sockaddr *address, >>>>>>> const char *netid); >>>>>>> +void rpcb_put_local(void); >>>>>>> void rpcb_getport_async(struct rpc_task *); >>>>>>> >>>>>>> void rpc_call_start(struct rpc_task *); >>>>>>> diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c >>>>>>> index b4cc0f1..437ec60 100644 >>>>>>> --- a/net/sunrpc/rpcb_clnt.c >>>>>>> +++ b/net/sunrpc/rpcb_clnt.c >>>>>>> @@ -318,7 +318,7 @@ out: >>>>>>> * Returns zero on success, otherwise a negative errno value >>>>>>> * is returned. >>>>>>> */ >>>>>>> -static int rpcb_create_local(void) >>>>>>> +int rpcb_create_local(void) >>>>>>> { >>>>>>> static DEFINE_MUTEX(rpcb_create_local_mutex); >>>>>>> int result = 0; >>>>>>> diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c >>>>>>> index 6a69a11..9095c0e 100644 >>>>>>> --- a/net/sunrpc/svc.c >>>>>>> +++ b/net/sunrpc/svc.c >>>>>>> @@ -367,8 +367,11 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, >>>>>>> unsigned int xdrsize; >>>>>>> unsigned int i; >>>>>>> >>>>>>> - if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) >>>>>>> + if (rpcb_create_local()< 0) >>>>>>> return NULL; >>>>>>> + >>>>>>> + if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) >>>>>>> + goto out_err; >>>>>>> serv->sv_name = prog->pg_name; >>>>>>> serv->sv_program = prog; >>>>>>> serv->sv_nrthreads = 1; >>>>>>> @@ -403,7 +406,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, >>>>>>> GFP_KERNEL); >>>>>>> if (!serv->sv_pools) { >>>>>>> kfree(serv); >>>>>>> - return NULL; >>>>>>> + goto out_err; >>>>>>> } >>>>>>> >>>>>>> for (i = 0; i< serv->sv_nrpools; i++) { >>>>>>> @@ -423,6 +426,10 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, >>>>>>> svc_unregister(serv); >>>>>>> >>>>>>> return serv; >>>>>>> + >>>>>>> +out_err: >>>>>>> + rpcb_put_local(); >>>>>>> + return NULL; >>>>>>> } >>>>>>> >>>>>>> struct svc_serv * >>>>>>> @@ -491,6 +498,8 @@ svc_destroy(struct svc_serv *serv) >>>>>>> svc_unregister(serv); >>>>>>> kfree(serv->sv_pools); >>>>>>> kfree(serv); >>>>>>> + >>>>>>> + rpcb_put_local(); >>>>>>> } >>>>>>> EXPORT_SYMBOL_GPL(svc_destroy); >>>>>>> >>>>>>> >>>>>> >>>>>> I don't get it -- what's the advantage of creating rpcbind clients in >>>>>> __svc_create vs. the old way of creating them just before we plan to >>>>>> use them? >>>>>> >>>>> >>>>> The main problem here is not in creation, but in destroying those clients. >>>>> Now rpcbind clients are created during rpcb_register(). I.e. once per every family, program version and so on. >>>>> But can be unregistered for all protocol families by one call. So it's impossible to put reference counting for those clients in the place, where they are created now. >>>> >>>> Could we perhaps set up a 'struct pernet_operations' to create a >>>> destructor for them? >>>> >>> >>> An even easier idea might be to just not take a reference to the >>> rpcbind client for svc_programs that have vs_hidden set on every >>> version. >> >> Isn't the problem that Stanislav is trying to solve that we need to be >> able to register and unregister RPC services to the correct rpcbind >> server, depending on which net namespace we are in? >> > > Yes, it is. > I'm going to make rpcbind clients per net namespace. > >> My understanding is that the current code will register everything to >> whatever rpcbind server is running in the init net namespace because >> that's what rpcb_create_local() uses. >> >> My suggestion is to use a struct pernet_operations to detect when a net >> namespace is being created or destroyed, so that the rpcbind client code >> knows when to create or destroy a connection to the server that is >> running in that namespace. >> > > But as Pavel mentioned already, we can't use netns destructor for rpcbind > clients because they holds netns reference. > That's why we have to untie netns from rpbind clients first. > Another solution is to not increment netns ref counter for rpcbind sockets as, > again, Pavel already mentioned. > But first approach looks clearer from my pow. That's why I'm trying to make > rcpbind client's self-destructible.After achieving this we can just make this > rpcbind clients per netns and then we can virtualize lockd. > > I've tried to find some better place for creating rpcbind clients (instead of > __svc_create()). But this place looks like the best one for current solution. > > About avoiding of creation of rpcbind clients for nfs 4.* callbacks. > Probably, we can implement init-fini calls for svc_program structure > (rpcb_create_local() and rpcb_put_local() will be used in our case) and then > inherit them for svc_serv. This will allow us to call this hooks only if they > defined and thus avoid rpcbind clients creation for nfs callbacks. > > What all of you think about this hook's idea? > Since we already have sv_shutdown callback, we can use it to put rpcbind clients. Creation of rpcbind clients can be performed in lockd_up() and nfsd_create_serv() before calling svc_create(_pooled)(). >> Cheers >> Trond > >
On Fri, 09 Sep 2011 15:25:08 -0400 Trond Myklebust <Trond.Myklebust@netapp.com> wrote: > On Fri, 2011-09-09 at 15:01 -0400, Jeff Layton wrote: > > On Fri, 09 Sep 2011 14:44:34 -0400 > > Trond Myklebust <Trond.Myklebust@netapp.com> wrote: > > > > > On Fri, 2011-09-09 at 20:41 +0400, Stanislav Kinsbursky wrote: > > > > 09.09.2011 18:07, Jeff Layton пишет: > > > > > On Fri, 09 Sep 2011 16:08:44 +0400 > > > > > Stanislav Kinsbursky<skinsbursky@parallels.com> wrote: > > > > > > > > > >> Create rcbind clients or increase rpcbind users counter during RPC service > > > > >> creation and decrease this counter (and possibly destroy those clients) on RPC > > > > >> service destruction. > > > > >> > > > > >> Signed-off-by: Stanislav Kinsbursky<skinsbursky@parallels.com> > > > > >> > > > > >> --- > > > > >> include/linux/sunrpc/clnt.h | 2 ++ > > > > >> net/sunrpc/rpcb_clnt.c | 2 +- > > > > >> net/sunrpc/svc.c | 13 +++++++++++-- > > > > >> 3 files changed, 14 insertions(+), 3 deletions(-) > > > > >> > > > > >> diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h > > > > >> index db7bcaf..65a8115 100644 > > > > >> --- a/include/linux/sunrpc/clnt.h > > > > >> +++ b/include/linux/sunrpc/clnt.h > > > > >> @@ -135,10 +135,12 @@ void rpc_shutdown_client(struct rpc_clnt *); > > > > >> void rpc_release_client(struct rpc_clnt *); > > > > >> void rpc_task_release_client(struct rpc_task *); > > > > >> > > > > >> +int rpcb_create_local(void); > > > > >> int rpcb_register(u32, u32, int, unsigned short); > > > > >> int rpcb_v4_register(const u32 program, const u32 version, > > > > >> const struct sockaddr *address, > > > > >> const char *netid); > > > > >> +void rpcb_put_local(void); > > > > >> void rpcb_getport_async(struct rpc_task *); > > > > >> > > > > >> void rpc_call_start(struct rpc_task *); > > > > >> diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c > > > > >> index b4cc0f1..437ec60 100644 > > > > >> --- a/net/sunrpc/rpcb_clnt.c > > > > >> +++ b/net/sunrpc/rpcb_clnt.c > > > > >> @@ -318,7 +318,7 @@ out: > > > > >> * Returns zero on success, otherwise a negative errno value > > > > >> * is returned. > > > > >> */ > > > > >> -static int rpcb_create_local(void) > > > > >> +int rpcb_create_local(void) > > > > >> { > > > > >> static DEFINE_MUTEX(rpcb_create_local_mutex); > > > > >> int result = 0; > > > > >> diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c > > > > >> index 6a69a11..9095c0e 100644 > > > > >> --- a/net/sunrpc/svc.c > > > > >> +++ b/net/sunrpc/svc.c > > > > >> @@ -367,8 +367,11 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, > > > > >> unsigned int xdrsize; > > > > >> unsigned int i; > > > > >> > > > > >> - if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) > > > > >> + if (rpcb_create_local()< 0) > > > > >> return NULL; > > > > >> + > > > > >> + if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) > > > > >> + goto out_err; > > > > >> serv->sv_name = prog->pg_name; > > > > >> serv->sv_program = prog; > > > > >> serv->sv_nrthreads = 1; > > > > >> @@ -403,7 +406,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, > > > > >> GFP_KERNEL); > > > > >> if (!serv->sv_pools) { > > > > >> kfree(serv); > > > > >> - return NULL; > > > > >> + goto out_err; > > > > >> } > > > > >> > > > > >> for (i = 0; i< serv->sv_nrpools; i++) { > > > > >> @@ -423,6 +426,10 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, > > > > >> svc_unregister(serv); > > > > >> > > > > >> return serv; > > > > >> + > > > > >> +out_err: > > > > >> + rpcb_put_local(); > > > > >> + return NULL; > > > > >> } > > > > >> > > > > >> struct svc_serv * > > > > >> @@ -491,6 +498,8 @@ svc_destroy(struct svc_serv *serv) > > > > >> svc_unregister(serv); > > > > >> kfree(serv->sv_pools); > > > > >> kfree(serv); > > > > >> + > > > > >> + rpcb_put_local(); > > > > >> } > > > > >> EXPORT_SYMBOL_GPL(svc_destroy); > > > > >> > > > > >> > > > > > > > > > > I don't get it -- what's the advantage of creating rpcbind clients in > > > > > __svc_create vs. the old way of creating them just before we plan to > > > > > use them? > > > > > > > > > > > > > The main problem here is not in creation, but in destroying those clients. > > > > Now rpcbind clients are created during rpcb_register(). I.e. once per every family, program version and so on. > > > > But can be unregistered for all protocol families by one call. So it's impossible to put reference counting for those clients in the place, where they are created now. > > > > > > Could we perhaps set up a 'struct pernet_operations' to create a > > > destructor for them? > > > > > > > An even easier idea might be to just not take a reference to the > > rpcbind client for svc_programs that have vs_hidden set on every > > version. > > Isn't the problem that Stanislav is trying to solve that we need to be > able to register and unregister RPC services to the correct rpcbind > server, depending on which net namespace we are in? > > My understanding is that the current code will register everything to > whatever rpcbind server is running in the init net namespace because > that's what rpcb_create_local() uses. > My assumption in reading this set (maybe wrong) was that this was a preliminary set for now that just plops in function calls in the places that do this sort of thing now. I figured that eventually he'd convert rpcb_create_local, et. al. to do the same thing but within the correct namespace for the calling task. > My suggestion is to use a struct pernet_operations to detect when a net > namespace is being created or destroyed, so that the rpcbind client code > knows when to create or destroy a connection to the server that is > running in that namespace. > I'm not sure that solves anything. I'd also ass|u|me that something here (the xprt?) will hold a reference to its netns. If we try to do this with pernet ops, then I think we'd end up with a chicken-and-egg problem... I think the simplest solution would be to basically call these functions closer to where the rpcbind calls happen today, and just don't do them when the svc_program has vs_hidden set or if the xprt is being created with SVC_SOCK_ANONYMOUS set. In fact, a rethink of the vs_hidden/SVC_SOCK_ANONYMOUS scheme would probably be a good preliminary patch here. There's a lot of overlap between those two flags, and somehow consolidating them would probably be a good thing.
13.09.2011 16:51, Jeff Layton пишет: > My assumption in reading this set (maybe wrong) was that this was a > preliminary set for now that just plops in function calls in the places > that do this sort of thing now. I figured that eventually he'd convert > rpcb_create_local, et. al. to do the same thing but within the correct > namespace for the calling task. > You have a right assumption. This is exactly what I'm going to do next. > > I think the simplest solution would be to basically call these > functions closer to where the rpcbind calls happen today, and just > don't do them when the svc_program has vs_hidden set or if the xprt is > being created with SVC_SOCK_ANONYMOUS set. > This solution is not the simplest one since we call svc_register() for every svc socket if it's not anonymous. But svc_unregister() is called only once for all inet families and protocols. Also I've noticed, that we call svc_unregister in __svc_create(). I.e. we call it for nfs callbacks as well (in spite of that we don't need this). Thus, for now, nfs callbacks service creation depends on rpcbind clients presence. So, for my pow, we need something like startup() callback, passed to svc_create(_pooled)() to clean up this mess. This callback will be defined only for lockd and nfsd and will create rpcbind clients and remove any stale portmap registrations.
On Tue, 13 Sep 2011 17:39:59 +0400 Stanislav Kinsbursky <skinsbursky@parallels.com> wrote: > 13.09.2011 16:51, Jeff Layton пишет: > > My assumption in reading this set (maybe wrong) was that this was a > > preliminary set for now that just plops in function calls in the places > > that do this sort of thing now. I figured that eventually he'd convert > > rpcb_create_local, et. al. to do the same thing but within the correct > > namespace for the calling task. > > > > You have a right assumption. This is exactly what I'm going to do next. > > > > > I think the simplest solution would be to basically call these > > functions closer to where the rpcbind calls happen today, and just > > don't do them when the svc_program has vs_hidden set or if the xprt is > > being created with SVC_SOCK_ANONYMOUS set. > > > > This solution is not the simplest one since we call svc_register() for every svc > socket if it's not anonymous. But svc_unregister() is called only once for all > inet families and protocols. > Ahh ok, good point. > Also I've noticed, that we call svc_unregister in __svc_create(). I.e. we call > it for nfs callbacks as well (in spite of that we don't need this). Thus, for > now, nfs callbacks service creation depends on rpcbind clients presence. > Yeah, that's just to remove the any existing registration before we set up the new one. In the case of a "hidden" service that can probably be skipped if it makes things easier. > So, for my pow, we need something like startup() callback, passed to > svc_create(_pooled)() to clean up this mess. > This callback will be defined only for lockd and nfsd and will create rpcbind > clients and remove any stale portmap registrations. > That sounds like a reasonable scheme. I'll wait to see the patches.
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index db7bcaf..65a8115 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -135,10 +135,12 @@ void rpc_shutdown_client(struct rpc_clnt *); void rpc_release_client(struct rpc_clnt *); void rpc_task_release_client(struct rpc_task *); +int rpcb_create_local(void); int rpcb_register(u32, u32, int, unsigned short); int rpcb_v4_register(const u32 program, const u32 version, const struct sockaddr *address, const char *netid); +void rpcb_put_local(void); void rpcb_getport_async(struct rpc_task *); void rpc_call_start(struct rpc_task *); diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index b4cc0f1..437ec60 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -318,7 +318,7 @@ out: * Returns zero on success, otherwise a negative errno value * is returned. */ -static int rpcb_create_local(void) +int rpcb_create_local(void) { static DEFINE_MUTEX(rpcb_create_local_mutex); int result = 0; diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 6a69a11..9095c0e 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -367,8 +367,11 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, unsigned int xdrsize; unsigned int i; - if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) + if (rpcb_create_local() < 0) return NULL; + + if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) + goto out_err; serv->sv_name = prog->pg_name; serv->sv_program = prog; serv->sv_nrthreads = 1; @@ -403,7 +406,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, GFP_KERNEL); if (!serv->sv_pools) { kfree(serv); - return NULL; + goto out_err; } for (i = 0; i < serv->sv_nrpools; i++) { @@ -423,6 +426,10 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, svc_unregister(serv); return serv; + +out_err: + rpcb_put_local(); + return NULL; } struct svc_serv * @@ -491,6 +498,8 @@ svc_destroy(struct svc_serv *serv) svc_unregister(serv); kfree(serv->sv_pools); kfree(serv); + + rpcb_put_local(); } EXPORT_SYMBOL_GPL(svc_destroy);
Create rcbind clients or increase rpcbind users counter during RPC service creation and decrease this counter (and possibly destroy those clients) on RPC service destruction. Signed-off-by: Stanislav Kinsbursky <skinsbursky@parallels.com> --- include/linux/sunrpc/clnt.h | 2 ++ net/sunrpc/rpcb_clnt.c | 2 +- net/sunrpc/svc.c | 13 +++++++++++-- 3 files changed, 14 insertions(+), 3 deletions(-) -- To unsubscribe from this list: send the line "unsubscribe netdev" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html