Message ID | 1465007915-7467-1-git-send-email-tim.gardner@canonical.com |
---|---|
State | New |
Headers | show |
Clean cherry-pick, already in 4.6. Were you able to crashdump a Xenial hyper-v guest without this patch? It worked for me last time I tried a few weeks ago. --chris On Fri, Jun 03, 2016 at 08:38:35PM -0600, Tim Gardner wrote: > From: Vitaly Kuznetsov <vkuznets@redhat.com> > > BugLink: http://bugs.launchpad.net/bugs/1588965 > > Hyper-V vmbus module registers TSC page clocksource when loaded. This is > the clocksource with the highest rating and thus it becomes the watchdog > making unloading of the vmbus module impossible. > Separate clocksource_select_watchdog() from clocksource_enqueue_watchdog() > and use it on clocksource register/rating change/unregister. > > After all, lobotomized monkeys may need some love too. > > Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> > Cc: John Stultz <john.stultz@linaro.org> > Cc: Dexuan Cui <decui@microsoft.com> > Cc: K. Y. Srinivasan <kys@microsoft.com> > Link: http://lkml.kernel.org/r/1453483913-25672-1-git-send-email-vkuznets@redhat.com > Signed-off-by: Thomas Gleixner <tglx@linutronix.de> > (cherry picked from commit bbf66d897adf2bb0c310db96c97e8db6369f39e1) > Signed-off-by: Tim Gardner <tim.gardner@canonical.com> > --- > kernel/time/clocksource.c | 52 ++++++++++++++++++++++++++++++++++++++--------- > 1 file changed, 42 insertions(+), 10 deletions(-) > > diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c > index 1347882..b98810d 100644 > --- a/kernel/time/clocksource.c > +++ b/kernel/time/clocksource.c > @@ -323,13 +323,42 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) > /* cs is a watchdog. */ > if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) > cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; > + } > + spin_unlock_irqrestore(&watchdog_lock, flags); > +} > + > +static void clocksource_select_watchdog(bool fallback) > +{ > + struct clocksource *cs, *old_wd; > + unsigned long flags; > + > + spin_lock_irqsave(&watchdog_lock, flags); > + /* save current watchdog */ > + old_wd = watchdog; > + if (fallback) > + watchdog = NULL; > + > + list_for_each_entry(cs, &clocksource_list, list) { > + /* cs is a clocksource to be watched. */ > + if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) > + continue; > + > + /* Skip current if we were requested for a fallback. */ > + if (fallback && cs == old_wd) > + continue; > + > /* Pick the best watchdog. */ > - if (!watchdog || cs->rating > watchdog->rating) { > + if (!watchdog || cs->rating > watchdog->rating) > watchdog = cs; > - /* Reset watchdog cycles */ > - clocksource_reset_watchdog(); > - } > } > + /* If we failed to find a fallback restore the old one. */ > + if (!watchdog) > + watchdog = old_wd; > + > + /* If we changed the watchdog we need to reset cycles. */ > + if (watchdog != old_wd) > + clocksource_reset_watchdog(); > + > /* Check if the watchdog timer needs to be started. */ > clocksource_start_watchdog(); > spin_unlock_irqrestore(&watchdog_lock, flags); > @@ -404,6 +433,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) > cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; > } > > +static void clocksource_select_watchdog(bool fallback) { } > static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } > static inline void clocksource_resume_watchdog(void) { } > static inline int __clocksource_watchdog_kthread(void) { return 0; } > @@ -736,6 +766,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) > clocksource_enqueue(cs); > clocksource_enqueue_watchdog(cs); > clocksource_select(); > + clocksource_select_watchdog(false); > mutex_unlock(&clocksource_mutex); > return 0; > } > @@ -758,6 +789,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating) > mutex_lock(&clocksource_mutex); > __clocksource_change_rating(cs, rating); > clocksource_select(); > + clocksource_select_watchdog(false); > mutex_unlock(&clocksource_mutex); > } > EXPORT_SYMBOL(clocksource_change_rating); > @@ -767,12 +799,12 @@ EXPORT_SYMBOL(clocksource_change_rating); > */ > static int clocksource_unbind(struct clocksource *cs) > { > - /* > - * I really can't convince myself to support this on hardware > - * designed by lobotomized monkeys. > - */ > - if (clocksource_is_watchdog(cs)) > - return -EBUSY; > + if (clocksource_is_watchdog(cs)) { > + /* Select and try to install a replacement watchdog. */ > + clocksource_select_watchdog(true); > + if (clocksource_is_watchdog(cs)) > + return -EBUSY; > + } > > if (cs == curr_clocksource) { > /* Select and try to install a replacement clock source */ > -- > 1.9.1 > > > -- > kernel-team mailing list > kernel-team@lists.ubuntu.com > https://lists.ubuntu.com/mailman/listinfo/kernel-team
Nope, I'm gonna let the MS dudes verify it since they have the HV setup (and I don't). rtg On 06/06/2016 07:14 AM, Chris J Arges wrote: > Clean cherry-pick, already in 4.6. > > Were you able to crashdump a Xenial hyper-v guest without this patch? It worked > for me last time I tried a few weeks ago. > > --chris > > On Fri, Jun 03, 2016 at 08:38:35PM -0600, Tim Gardner wrote: >> From: Vitaly Kuznetsov <vkuznets@redhat.com> >> >> BugLink: http://bugs.launchpad.net/bugs/1588965 >> >> Hyper-V vmbus module registers TSC page clocksource when loaded. This is >> the clocksource with the highest rating and thus it becomes the watchdog >> making unloading of the vmbus module impossible. >> Separate clocksource_select_watchdog() from clocksource_enqueue_watchdog() >> and use it on clocksource register/rating change/unregister. >> >> After all, lobotomized monkeys may need some love too. >> >> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> >> Cc: John Stultz <john.stultz@linaro.org> >> Cc: Dexuan Cui <decui@microsoft.com> >> Cc: K. Y. Srinivasan <kys@microsoft.com> >> Link: http://lkml.kernel.org/r/1453483913-25672-1-git-send-email-vkuznets@redhat.com >> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> >> (cherry picked from commit bbf66d897adf2bb0c310db96c97e8db6369f39e1) >> Signed-off-by: Tim Gardner <tim.gardner@canonical.com> >> --- >> kernel/time/clocksource.c | 52 ++++++++++++++++++++++++++++++++++++++--------- >> 1 file changed, 42 insertions(+), 10 deletions(-) >> >> diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c >> index 1347882..b98810d 100644 >> --- a/kernel/time/clocksource.c >> +++ b/kernel/time/clocksource.c >> @@ -323,13 +323,42 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) >> /* cs is a watchdog. */ >> if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) >> cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; >> + } >> + spin_unlock_irqrestore(&watchdog_lock, flags); >> +} >> + >> +static void clocksource_select_watchdog(bool fallback) >> +{ >> + struct clocksource *cs, *old_wd; >> + unsigned long flags; >> + >> + spin_lock_irqsave(&watchdog_lock, flags); >> + /* save current watchdog */ >> + old_wd = watchdog; >> + if (fallback) >> + watchdog = NULL; >> + >> + list_for_each_entry(cs, &clocksource_list, list) { >> + /* cs is a clocksource to be watched. */ >> + if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) >> + continue; >> + >> + /* Skip current if we were requested for a fallback. */ >> + if (fallback && cs == old_wd) >> + continue; >> + >> /* Pick the best watchdog. */ >> - if (!watchdog || cs->rating > watchdog->rating) { >> + if (!watchdog || cs->rating > watchdog->rating) >> watchdog = cs; >> - /* Reset watchdog cycles */ >> - clocksource_reset_watchdog(); >> - } >> } >> + /* If we failed to find a fallback restore the old one. */ >> + if (!watchdog) >> + watchdog = old_wd; >> + >> + /* If we changed the watchdog we need to reset cycles. */ >> + if (watchdog != old_wd) >> + clocksource_reset_watchdog(); >> + >> /* Check if the watchdog timer needs to be started. */ >> clocksource_start_watchdog(); >> spin_unlock_irqrestore(&watchdog_lock, flags); >> @@ -404,6 +433,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) >> cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; >> } >> >> +static void clocksource_select_watchdog(bool fallback) { } >> static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } >> static inline void clocksource_resume_watchdog(void) { } >> static inline int __clocksource_watchdog_kthread(void) { return 0; } >> @@ -736,6 +766,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) >> clocksource_enqueue(cs); >> clocksource_enqueue_watchdog(cs); >> clocksource_select(); >> + clocksource_select_watchdog(false); >> mutex_unlock(&clocksource_mutex); >> return 0; >> } >> @@ -758,6 +789,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating) >> mutex_lock(&clocksource_mutex); >> __clocksource_change_rating(cs, rating); >> clocksource_select(); >> + clocksource_select_watchdog(false); >> mutex_unlock(&clocksource_mutex); >> } >> EXPORT_SYMBOL(clocksource_change_rating); >> @@ -767,12 +799,12 @@ EXPORT_SYMBOL(clocksource_change_rating); >> */ >> static int clocksource_unbind(struct clocksource *cs) >> { >> - /* >> - * I really can't convince myself to support this on hardware >> - * designed by lobotomized monkeys. >> - */ >> - if (clocksource_is_watchdog(cs)) >> - return -EBUSY; >> + if (clocksource_is_watchdog(cs)) { >> + /* Select and try to install a replacement watchdog. */ >> + clocksource_select_watchdog(true); >> + if (clocksource_is_watchdog(cs)) >> + return -EBUSY; >> + } >> >> if (cs == curr_clocksource) { >> /* Select and try to install a replacement clock source */ >> -- >> 1.9.1 >> >> >> -- >> kernel-team mailing list >> kernel-team@lists.ubuntu.com >> https://lists.ubuntu.com/mailman/listinfo/kernel-team
Applied to X. -Kamal
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 1347882..b98810d 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -323,13 +323,42 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) /* cs is a watchdog. */ if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; + } + spin_unlock_irqrestore(&watchdog_lock, flags); +} + +static void clocksource_select_watchdog(bool fallback) +{ + struct clocksource *cs, *old_wd; + unsigned long flags; + + spin_lock_irqsave(&watchdog_lock, flags); + /* save current watchdog */ + old_wd = watchdog; + if (fallback) + watchdog = NULL; + + list_for_each_entry(cs, &clocksource_list, list) { + /* cs is a clocksource to be watched. */ + if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) + continue; + + /* Skip current if we were requested for a fallback. */ + if (fallback && cs == old_wd) + continue; + /* Pick the best watchdog. */ - if (!watchdog || cs->rating > watchdog->rating) { + if (!watchdog || cs->rating > watchdog->rating) watchdog = cs; - /* Reset watchdog cycles */ - clocksource_reset_watchdog(); - } } + /* If we failed to find a fallback restore the old one. */ + if (!watchdog) + watchdog = old_wd; + + /* If we changed the watchdog we need to reset cycles. */ + if (watchdog != old_wd) + clocksource_reset_watchdog(); + /* Check if the watchdog timer needs to be started. */ clocksource_start_watchdog(); spin_unlock_irqrestore(&watchdog_lock, flags); @@ -404,6 +433,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; } +static void clocksource_select_watchdog(bool fallback) { } static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } static inline void clocksource_resume_watchdog(void) { } static inline int __clocksource_watchdog_kthread(void) { return 0; } @@ -736,6 +766,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) clocksource_enqueue(cs); clocksource_enqueue_watchdog(cs); clocksource_select(); + clocksource_select_watchdog(false); mutex_unlock(&clocksource_mutex); return 0; } @@ -758,6 +789,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating) mutex_lock(&clocksource_mutex); __clocksource_change_rating(cs, rating); clocksource_select(); + clocksource_select_watchdog(false); mutex_unlock(&clocksource_mutex); } EXPORT_SYMBOL(clocksource_change_rating); @@ -767,12 +799,12 @@ EXPORT_SYMBOL(clocksource_change_rating); */ static int clocksource_unbind(struct clocksource *cs) { - /* - * I really can't convince myself to support this on hardware - * designed by lobotomized monkeys. - */ - if (clocksource_is_watchdog(cs)) - return -EBUSY; + if (clocksource_is_watchdog(cs)) { + /* Select and try to install a replacement watchdog. */ + clocksource_select_watchdog(true); + if (clocksource_is_watchdog(cs)) + return -EBUSY; + } if (cs == curr_clocksource) { /* Select and try to install a replacement clock source */