Message ID | 4AAA6E10.6080308@codemonkey.ws |
---|---|
State | Superseded |
Headers | show |
Anthony Liguori <anthony@codemonkey.ws> wrote: > malc wrote: >> And generalizations are always true. Anyhow, i'm explicitly against the >> patch, so first obtain the express acknowledgment from the leaders, >> otherwise i'll revert it should it go in. >> > > I'm adding the following patch to Juan's series. The result is that > get_ticks_per_sec() should be optimized to a literal value. The > result being that uses of it are faster than they were before (not it > should matter). > > I think the result of this patch is that the refactoring is an > undeniable improvement. > > Regards, > > Anthony Liguori Nice. Acked-by: Juan Quintela <quintela@redhat.com> > > commit 1c7aff17af0ca9e1803b952ce455f096c5da8847 > Author: Anthony Liguori <aliguori@us.ibm.com> > Date: Fri Sep 11 10:28:26 2009 -0500 > > Make get_ticks_per_sec() a static inline > > ticks_per_sec is a constant. There's no need to store it as a variable as it > never changes since our time is based on units. > > Convert get_ticks_per_sec() to a static inline and move the constant into > qemu-timer.h. Remove all references to QEMU_TIMER_BASE so that we consistently > use this interface. > > Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> > > diff --git a/qemu-timer.h b/qemu-timer.h > index 00b166d..e44c334 100644 > --- a/qemu-timer.h > +++ b/qemu-timer.h > @@ -26,7 +26,10 @@ void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time); > int qemu_timer_pending(QEMUTimer *ts); > int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time); > > -int64_t get_ticks_per_sec(void); > +static inline int64_t get_ticks_per_sec(void) > +{ > + return 1000000000LL; > +} > > void qemu_get_timer(QEMUFile *f, QEMUTimer *ts); > void qemu_put_timer(QEMUFile *f, QEMUTimer *ts); > diff --git a/vl.c b/vl.c > index 6052b1c..c3c874d 100644 > --- a/vl.c > +++ b/vl.c > @@ -528,8 +528,6 @@ uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) > /***********************************************************/ > /* real time host monotonic timer */ > > -#define QEMU_TIMER_BASE 1000000000LL > - > #ifdef WIN32 > > static int64_t clock_freq; > @@ -550,7 +548,7 @@ static int64_t get_clock(void) > { > LARGE_INTEGER ti; > QueryPerformanceCounter(&ti); > - return muldiv64(ti.QuadPart, QEMU_TIMER_BASE, clock_freq); > + return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq); > } > > #else > @@ -758,7 +756,7 @@ static void rtc_stop_timer(struct qemu_alarm_timer *t); > fairly approximate, so ignore small variation. > When the guest is idle real and virtual time will be aligned in > the IO wait loop. */ > -#define ICOUNT_WOBBLE (QEMU_TIMER_BASE / 10) > +#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10) > > static void icount_adjust(void) > { > @@ -800,7 +798,7 @@ static void icount_adjust_rt(void * opaque) > static void icount_adjust_vm(void * opaque) > { > qemu_mod_timer(icount_vm_timer, > - qemu_get_clock(vm_clock) + QEMU_TIMER_BASE / 10); > + qemu_get_clock(vm_clock) + get_ticks_per_sec() / 10); > icount_adjust(); > } > > @@ -816,7 +814,7 @@ static void init_icount_adjust(void) > qemu_get_clock(rt_clock) + 1000); > icount_vm_timer = qemu_new_timer(vm_clock, icount_adjust_vm, NULL); > qemu_mod_timer(icount_vm_timer, > - qemu_get_clock(vm_clock) + QEMU_TIMER_BASE / 10); > + qemu_get_clock(vm_clock) + get_ticks_per_sec() / 10); > } > > static struct qemu_alarm_timer alarm_timers[] = { > @@ -1036,15 +1034,10 @@ int64_t qemu_get_clock(QEMUClock *clock) > } > } > > -int64_t get_ticks_per_sec(void) > -{ > - return timers_state.ticks_per_sec; > -} > - > static void init_timers(void) > { > init_get_clock(); > - timers_state.ticks_per_sec = QEMU_TIMER_BASE; > + timers_state.ticks_per_sec = get_ticks_per_sec(); > rt_clock = qemu_new_clock(QEMU_TIMER_REALTIME); > vm_clock = qemu_new_clock(QEMU_TIMER_VIRTUAL); > }
Anthony Liguori wrote: > malc wrote: >> And generalizations are always true. Anyhow, i'm explicitly against the >> patch, so first obtain the express acknowledgment from the leaders, >> otherwise i'll revert it should it go in. >> > > I'm adding the following patch to Juan's series. The result is that > get_ticks_per_sec() should be optimized to a literal value. The result > being that uses of it are faster than they were before (not it should > matter). Perfect! > > I think the result of this patch is that the refactoring is an > undeniable improvement. > > Regards, > > Anthony Liguori > > Jan
On Friday 11 September 2009, Anthony Liguori wrote: > malc wrote: > > And generalizations are always true. Anyhow, i'm explicitly against the > > patch, so first obtain the express acknowledgment from the leaders, > > otherwise i'll revert it should it go in. > > I'm adding the following patch to Juan's series. The result is that > get_ticks_per_sec() should be optimized to a literal value. The result > being that uses of it are faster than they were before (not it should > matter). Having this as a function/variable is completely misleading. It is and always will be 1000000000. You'd be better off using QEMU_TIMER_BASE directly. Paul
commit 1c7aff17af0ca9e1803b952ce455f096c5da8847 Author: Anthony Liguori <aliguori@us.ibm.com> Date: Fri Sep 11 10:28:26 2009 -0500 Make get_ticks_per_sec() a static inline ticks_per_sec is a constant. There's no need to store it as a variable as it never changes since our time is based on units. Convert get_ticks_per_sec() to a static inline and move the constant into qemu-timer.h. Remove all references to QEMU_TIMER_BASE so that we consistently use this interface. Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> diff --git a/qemu-timer.h b/qemu-timer.h index 00b166d..e44c334 100644 --- a/qemu-timer.h +++ b/qemu-timer.h @@ -26,7 +26,10 @@ void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time); int qemu_timer_pending(QEMUTimer *ts); int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time); -int64_t get_ticks_per_sec(void); +static inline int64_t get_ticks_per_sec(void) +{ + return 1000000000LL; +} void qemu_get_timer(QEMUFile *f, QEMUTimer *ts); void qemu_put_timer(QEMUFile *f, QEMUTimer *ts); diff --git a/vl.c b/vl.c index 6052b1c..c3c874d 100644 --- a/vl.c +++ b/vl.c @@ -528,8 +528,6 @@ uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) /***********************************************************/ /* real time host monotonic timer */ -#define QEMU_TIMER_BASE 1000000000LL - #ifdef WIN32 static int64_t clock_freq; @@ -550,7 +548,7 @@ static int64_t get_clock(void) { LARGE_INTEGER ti; QueryPerformanceCounter(&ti); - return muldiv64(ti.QuadPart, QEMU_TIMER_BASE, clock_freq); + return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq); } #else @@ -758,7 +756,7 @@ static void rtc_stop_timer(struct qemu_alarm_timer *t); fairly approximate, so ignore small variation. When the guest is idle real and virtual time will be aligned in the IO wait loop. */ -#define ICOUNT_WOBBLE (QEMU_TIMER_BASE / 10) +#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10) static void icount_adjust(void) { @@ -800,7 +798,7 @@ static void icount_adjust_rt(void * opaque) static void icount_adjust_vm(void * opaque) { qemu_mod_timer(icount_vm_timer, - qemu_get_clock(vm_clock) + QEMU_TIMER_BASE / 10); + qemu_get_clock(vm_clock) + get_ticks_per_sec() / 10); icount_adjust(); } @@ -816,7 +814,7 @@ static void init_icount_adjust(void) qemu_get_clock(rt_clock) + 1000); icount_vm_timer = qemu_new_timer(vm_clock, icount_adjust_vm, NULL); qemu_mod_timer(icount_vm_timer, - qemu_get_clock(vm_clock) + QEMU_TIMER_BASE / 10); + qemu_get_clock(vm_clock) + get_ticks_per_sec() / 10); } static struct qemu_alarm_timer alarm_timers[] = { @@ -1036,15 +1034,10 @@ int64_t qemu_get_clock(QEMUClock *clock) } } -int64_t get_ticks_per_sec(void) -{ - return timers_state.ticks_per_sec; -} - static void init_timers(void) { init_get_clock(); - timers_state.ticks_per_sec = QEMU_TIMER_BASE; + timers_state.ticks_per_sec = get_ticks_per_sec(); rt_clock = qemu_new_clock(QEMU_TIMER_REALTIME); vm_clock = qemu_new_clock(QEMU_TIMER_VIRTUAL); }