diff mbox

perf_event, powerpc: Fix compilation after big perf_counter rename

Message ID 19128.4280.813369.589704@cargo.ozlabs.ibm.com (mailing list archive)
State Accepted, archived
Headers show

Commit Message

Paul Mackerras Sept. 21, 2009, 11:48 p.m. UTC
This fixes two places in the powerpc perf_event (perf_counter) code
where 'list_entry' needs to be changed to 'group_entry', but were
missed in commit 65abc865 ("perf_counter: Rename list_entry ->
group_entry, counter_list -> group_list").

This also changes 'event' back to 'counter' in a couple of contexts:

* Field and function names that deal with the limited-function
  counters: it's really the hardware counters whose function is
  limited, not the events that they count.  Hence:

  MAX_LIMITED_HWEVENTS -> MAX_LIMITED_HWCOUNTERS
  limited_event -> limited_counter
  freeze/thaw_limited_events -> freeze/thaw_limited_counters

* The machine-specific PMU description struct (struct power_pmu): this
  renames 'n_event' back to 'n_counter' since it really describes how
  many hardware counters the machine has.  (Renaming this back avoids
  a compile error in each of the machine-specific PMU back-ends where
  they initialize their power_pmu struct.)

Signed-off-by: Paul Mackerras <paulus@samba.org>
---
 arch/powerpc/include/asm/perf_event.h |    4 +--
 arch/powerpc/kernel/perf_event.c      |   38 +++++++++++++++++-----------------
 2 files changed, 21 insertions(+), 21 deletions(-)

Comments

Benjamin Herrenschmidt Sept. 22, 2009, 1:56 a.m. UTC | #1
On Tue, 2009-09-22 at 09:48 +1000, Paul Mackerras wrote:
> This fixes two places in the powerpc perf_event (perf_counter) code
> where 'list_entry' needs to be changed to 'group_entry', but were
> missed in commit 65abc865 ("perf_counter: Rename list_entry ->
> group_entry, counter_list -> group_list").

Ingo: This is becoming a recurring one now... powerpc build upstream  is
broken approx everyday by some new perfctr build breakage.

You really aren't build testing other architectures than x86 right ?

Ben.

> This also changes 'event' back to 'counter' in a couple of contexts:
> 
> * Field and function names that deal with the limited-function
>   counters: it's really the hardware counters whose function is
>   limited, not the events that they count.  Hence:
> 
>   MAX_LIMITED_HWEVENTS -> MAX_LIMITED_HWCOUNTERS
>   limited_event -> limited_counter
>   freeze/thaw_limited_events -> freeze/thaw_limited_counters
> 
> * The machine-specific PMU description struct (struct power_pmu): this
>   renames 'n_event' back to 'n_counter' since it really describes how
>   many hardware counters the machine has.  (Renaming this back avoids
>   a compile error in each of the machine-specific PMU back-ends where
>   they initialize their power_pmu struct.)
> 
> Signed-off-by: Paul Mackerras <paulus@samba.org>
> ---
>  arch/powerpc/include/asm/perf_event.h |    4 +--
>  arch/powerpc/kernel/perf_event.c      |   38 +++++++++++++++++-----------------
>  2 files changed, 21 insertions(+), 21 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h
> index 2499aaa..3288ce3 100644
> --- a/arch/powerpc/include/asm/perf_event.h
> +++ b/arch/powerpc/include/asm/perf_event.h
> @@ -14,7 +14,7 @@
>  
>  #define MAX_HWEVENTS		8
>  #define MAX_EVENT_ALTERNATIVES	8
> -#define MAX_LIMITED_HWEVENTS	2
> +#define MAX_LIMITED_HWCOUNTERS	2
>  
>  /*
>   * This struct provides the constants and functions needed to
> @@ -22,7 +22,7 @@
>   */
>  struct power_pmu {
>  	const char	*name;
> -	int		n_event;
> +	int		n_counter;
>  	int		max_alternatives;
>  	unsigned long	add_fields;
>  	unsigned long	test_adder;
> diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
> index 197b7d9..bbcbae1 100644
> --- a/arch/powerpc/kernel/perf_event.c
> +++ b/arch/powerpc/kernel/perf_event.c
> @@ -30,8 +30,8 @@ struct cpu_hw_events {
>  	u64 events[MAX_HWEVENTS];
>  	unsigned int flags[MAX_HWEVENTS];
>  	unsigned long mmcr[3];
> -	struct perf_event *limited_event[MAX_LIMITED_HWEVENTS];
> -	u8  limited_hwidx[MAX_LIMITED_HWEVENTS];
> +	struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
> +	u8  limited_hwidx[MAX_LIMITED_HWCOUNTERS];
>  	u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
>  	unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
>  	unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
> @@ -253,7 +253,7 @@ static int power_check_constraints(struct cpu_hw_events *cpuhw,
>  	unsigned long addf = ppmu->add_fields;
>  	unsigned long tadd = ppmu->test_adder;
>  
> -	if (n_ev > ppmu->n_event)
> +	if (n_ev > ppmu->n_counter)
>  		return -1;
>  
>  	/* First see if the events will go on as-is */
> @@ -426,7 +426,7 @@ static int is_limited_pmc(int pmcnum)
>  		&& (pmcnum == 5 || pmcnum == 6);
>  }
>  
> -static void freeze_limited_events(struct cpu_hw_events *cpuhw,
> +static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
>  				    unsigned long pmc5, unsigned long pmc6)
>  {
>  	struct perf_event *event;
> @@ -434,7 +434,7 @@ static void freeze_limited_events(struct cpu_hw_events *cpuhw,
>  	int i;
>  
>  	for (i = 0; i < cpuhw->n_limited; ++i) {
> -		event = cpuhw->limited_event[i];
> +		event = cpuhw->limited_counter[i];
>  		if (!event->hw.idx)
>  			continue;
>  		val = (event->hw.idx == 5) ? pmc5 : pmc6;
> @@ -445,7 +445,7 @@ static void freeze_limited_events(struct cpu_hw_events *cpuhw,
>  	}
>  }
>  
> -static void thaw_limited_events(struct cpu_hw_events *cpuhw,
> +static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
>  				  unsigned long pmc5, unsigned long pmc6)
>  {
>  	struct perf_event *event;
> @@ -453,7 +453,7 @@ static void thaw_limited_events(struct cpu_hw_events *cpuhw,
>  	int i;
>  
>  	for (i = 0; i < cpuhw->n_limited; ++i) {
> -		event = cpuhw->limited_event[i];
> +		event = cpuhw->limited_counter[i];
>  		event->hw.idx = cpuhw->limited_hwidx[i];
>  		val = (event->hw.idx == 5) ? pmc5 : pmc6;
>  		atomic64_set(&event->hw.prev_count, val);
> @@ -495,9 +495,9 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
>  		       "i" (SPRN_PMC5), "i" (SPRN_PMC6));
>  
>  	if (mmcr0 & MMCR0_FC)
> -		freeze_limited_events(cpuhw, pmc5, pmc6);
> +		freeze_limited_counters(cpuhw, pmc5, pmc6);
>  	else
> -		thaw_limited_events(cpuhw, pmc5, pmc6);
> +		thaw_limited_counters(cpuhw, pmc5, pmc6);
>  
>  	/*
>  	 * Write the full MMCR0 including the event overflow interrupt
> @@ -653,7 +653,7 @@ void hw_perf_enable(void)
>  			continue;
>  		idx = hwc_index[i] + 1;
>  		if (is_limited_pmc(idx)) {
> -			cpuhw->limited_event[n_lim] = event;
> +			cpuhw->limited_counter[n_lim] = event;
>  			cpuhw->limited_hwidx[n_lim] = idx;
>  			++n_lim;
>  			continue;
> @@ -702,7 +702,7 @@ static int collect_events(struct perf_event *group, int max_count,
>  		flags[n] = group->hw.event_base;
>  		events[n++] = group->hw.config;
>  	}
> -	list_for_each_entry(event, &group->sibling_list, list_entry) {
> +	list_for_each_entry(event, &group->sibling_list, group_entry) {
>  		if (!is_software_event(event) &&
>  		    event->state != PERF_EVENT_STATE_OFF) {
>  			if (n >= max_count)
> @@ -742,7 +742,7 @@ int hw_perf_group_sched_in(struct perf_event *group_leader,
>  		return 0;
>  	cpuhw = &__get_cpu_var(cpu_hw_events);
>  	n0 = cpuhw->n_events;
> -	n = collect_events(group_leader, ppmu->n_event - n0,
> +	n = collect_events(group_leader, ppmu->n_counter - n0,
>  			   &cpuhw->event[n0], &cpuhw->events[n0],
>  			   &cpuhw->flags[n0]);
>  	if (n < 0)
> @@ -764,7 +764,7 @@ int hw_perf_group_sched_in(struct perf_event *group_leader,
>  	cpuctx->active_oncpu += n;
>  	n = 1;
>  	event_sched_in(group_leader, cpu);
> -	list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
> +	list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
>  		if (sub->state != PERF_EVENT_STATE_OFF) {
>  			event_sched_in(sub, cpu);
>  			++n;
> @@ -797,7 +797,7 @@ static int power_pmu_enable(struct perf_event *event)
>  	 */
>  	cpuhw = &__get_cpu_var(cpu_hw_events);
>  	n0 = cpuhw->n_events;
> -	if (n0 >= ppmu->n_event)
> +	if (n0 >= ppmu->n_counter)
>  		goto out;
>  	cpuhw->event[n0] = event;
>  	cpuhw->events[n0] = event->hw.config;
> @@ -848,11 +848,11 @@ static void power_pmu_disable(struct perf_event *event)
>  		}
>  	}
>  	for (i = 0; i < cpuhw->n_limited; ++i)
> -		if (event == cpuhw->limited_event[i])
> +		if (event == cpuhw->limited_counter[i])
>  			break;
>  	if (i < cpuhw->n_limited) {
>  		while (++i < cpuhw->n_limited) {
> -			cpuhw->limited_event[i-1] = cpuhw->limited_event[i];
> +			cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
>  			cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
>  		}
>  		--cpuhw->n_limited;
> @@ -1078,7 +1078,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
>  	 */
>  	n = 0;
>  	if (event->group_leader != event) {
> -		n = collect_events(event->group_leader, ppmu->n_event - 1,
> +		n = collect_events(event->group_leader, ppmu->n_counter - 1,
>  				   ctrs, events, cflags);
>  		if (n < 0)
>  			return ERR_PTR(-EINVAL);
> @@ -1230,7 +1230,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
>  	int nmi;
>  
>  	if (cpuhw->n_limited)
> -		freeze_limited_events(cpuhw, mfspr(SPRN_PMC5),
> +		freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
>  					mfspr(SPRN_PMC6));
>  
>  	perf_read_regs(regs);
> @@ -1260,7 +1260,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
>  	 * Any that we processed in the previous loop will not be negative.
>  	 */
>  	if (!found) {
> -		for (i = 0; i < ppmu->n_event; ++i) {
> +		for (i = 0; i < ppmu->n_counter; ++i) {
>  			if (is_limited_pmc(i + 1))
>  				continue;
>  			val = read_pmc(i + 1);
> _______________________________________________
> Linuxppc-dev mailing list
> Linuxppc-dev@lists.ozlabs.org
> https://lists.ozlabs.org/listinfo/linuxppc-dev
Ingo Molnar Sept. 22, 2009, 7:28 a.m. UTC | #2
* Benjamin Herrenschmidt <benh@kernel.crashing.org> wrote:

> On Tue, 2009-09-22 at 09:48 +1000, Paul Mackerras wrote:
>
> > This fixes two places in the powerpc perf_event (perf_counter) code 
> > where 'list_entry' needs to be changed to 'group_entry', but were 
> > missed in commit 65abc865 ("perf_counter: Rename list_entry -> 
> > group_entry, counter_list -> group_list").

Oops, indeed - queued up the fix and will send it to Linus shortly - 
thanks!

> Ingo: This is becoming a recurring one now... powerpc build upstream 
> is broken approx everyday by some new perfctr build breakage.
>
> You really aren't build testing other architectures than x86 right ?

On the contrary - i am build testing every architecture on a daily 
basis. (and sometimes i do it multiple times a day - yesterday i did 5 
cross builds during the rename) In fact i am testing more architectures 
than linux-next does.

Here's the log of the test i ran yesterday before i sent those bits to 
Linus:

testing 24 architectures.
                                 (warns)               (warns)
testing      alpha:  -git:  pass (   24),  -tip:  pass (   24)
testing        arm:  -git:  fail (   11),  -tip:  fail (   13)
testing   blackfin:  -git:  pass (    3),  -tip:  pass (    3)
testing       cris:  -git:  fail (   34),  -tip:  pass (   20)
testing        frv:  -git:  fail (   13),  -tip:  fail (   13)
testing      h8300:  -git:  fail (  441),  -tip:  fail (  185)
testing       i386:  -git:  pass (    2),  -tip:  pass (    5)
testing       ia64:  -git:  fail (  172),  -tip:  pass (  160)
testing       m32r:  -git:  pass (   39),  -tip:  pass (   39)
testing       m68k:  -git:  pass (   42),  -tip:  pass (   42)
testing  m68knommu:  -git:  fail (   80),  -tip:  fail (   80)
testing microblaze:  -git:  fail (   14),  -tip:  fail (   14)
testing       mips:  -git:  pass (    6),  -tip:  pass (    6)
testing    mn10300:  -git:  fail (   10),  -tip:  fail (   10)
testing     parisc:  -git:  pass (   26),  -tip:  pass (   26)
testing    powerpc:  -git:  fail (   36),  -tip:  fail (   45)
testing       s390:  -git:  pass (    6),  -tip:  pass (    6)
testing      score:  -git:  fail (   13),  -tip:  fail (   13)
testing         sh:  -git:  fail (   22),  -tip:  fail (   19)
testing      sparc:  -git:  pass (    3),  -tip:  pass (    3)
testing         um:  -git:  pass (    3),  -tip:  pass (    3)
testing     xtensa:  -git:  fail (   46),  -tip:  fail (   46)
testing     x86-64:  -git:  pass (    0),  -tip:  pass (    0)
testing     x86-32:  -git:  pass (    0),  -tip:  pass (    0)

In fact there are architectures that dont build in Linus's tree and 
build in -tip:

testing       cris:  -git:  fail (   34),  -tip:  pass (   20)

Because not only do i test every architecture i also try to fix upstream 
bugs on non-x86 pro-actively. See for example this upstream fix:

 8d7ac69: Blackfin: Fix link errors with binutils 2.19 and GCC 4.3

Nevertheless you are right that i should have caught this particular 
PowerPC build bug - i missed it - sorry about that!

Thanks,

	Ingo
Benjamin Herrenschmidt Sept. 22, 2009, 8 a.m. UTC | #3
On Tue, 2009-09-22 at 09:28 +0200, Ingo Molnar wrote:
> 
> Nevertheless you are right that i should have caught this particular 
> PowerPC build bug - i missed it - sorry about that!
> 
Allright. Well, to help in general, we are setting up a build-bot
here too that will build -tip HEAD for at least powerpc daily with
a few configs too.

Cheers,
Ben.
Ingo Molnar Sept. 22, 2009, 8:13 a.m. UTC | #4
* Benjamin Herrenschmidt <benh@kernel.crashing.org> wrote:

> On Tue, 2009-09-22 at 09:28 +0200, Ingo Molnar wrote:
> > 
> > Nevertheless you are right that i should have caught this particular 
> > PowerPC build bug - i missed it - sorry about that!
> 
> Allright. Well, to help in general, we are setting up a build-bot here 
> too that will build -tip HEAD for at least powerpc daily with a few 
> configs too.

Cool, that's really useful! Especially during the weekends that will be 
helpful, in that timeframe linux-next driven testing has a latency of 
72-95 hours and -tip usually has an uptick in patches.

Thanks,

	Ingo
Michael Ellerman Sept. 22, 2009, 11:52 a.m. UTC | #5
On Tue, 2009-09-22 at 18:00 +1000, Benjamin Herrenschmidt wrote:
> On Tue, 2009-09-22 at 09:28 +0200, Ingo Molnar wrote:
> > 
> > Nevertheless you are right that i should have caught this particular 
> > PowerPC build bug - i missed it - sorry about that!
> > 
> Allright. Well, to help in general, we are setting up a build-bot
> here too that will build -tip HEAD for at least powerpc daily with
> a few configs too.

Results here:

http://kisskb.ellerman.id.au/kisskb/branch/12/

cheers
Ingo Molnar Sept. 23, 2009, 12:44 p.m. UTC | #6
* Michael Ellerman <michael@ellerman.id.au> wrote:

> On Tue, 2009-09-22 at 18:00 +1000, Benjamin Herrenschmidt wrote:
> > On Tue, 2009-09-22 at 09:28 +0200, Ingo Molnar wrote:
> > > 
> > > Nevertheless you are right that i should have caught this particular 
> > > PowerPC build bug - i missed it - sorry about that!
> > > 
> > Allright. Well, to help in general, we are setting up a build-bot
> > here too that will build -tip HEAD for at least powerpc daily with
> > a few configs too.
> 
> Results here:
> 
> http://kisskb.ellerman.id.au/kisskb/branch/12/

ok, seems green for today - the two failures are: one a powerpc 
toolchain problem it appears, plus a mainline warning.

Btw., for me to be able to notice failures there it would have to email 
me automatically if there's any -tip build failures that do not occur 
with the upstream branch. Does it have such a feature?

	Ingo
Michael Ellerman Sept. 23, 2009, 11:19 p.m. UTC | #7
On Wed, 2009-09-23 at 14:44 +0200, Ingo Molnar wrote:
> * Michael Ellerman <michael@ellerman.id.au> wrote:
> 
> > On Tue, 2009-09-22 at 18:00 +1000, Benjamin Herrenschmidt wrote:
> > > On Tue, 2009-09-22 at 09:28 +0200, Ingo Molnar wrote:
> > > > 
> > > > Nevertheless you are right that i should have caught this particular 
> > > > PowerPC build bug - i missed it - sorry about that!
> > > > 
> > > Allright. Well, to help in general, we are setting up a build-bot
> > > here too that will build -tip HEAD for at least powerpc daily with
> > > a few configs too.
> > 
> > Results here:
> > 
> > http://kisskb.ellerman.id.au/kisskb/branch/12/
> 
> ok, seems green for today - the two failures are: one a powerpc 
> toolchain problem it appears, plus a mainline warning.

Yep that looks more or less normal.

> Btw., for me to be able to notice failures there it would have to email 
> me automatically if there's any -tip build failures that do not occur 
> with the upstream branch. Does it have such a feature?

Not really, it sends mails to me, but it doesn't have a way to filter
them by branch. I think the plan is we'll keep an eye on it and either
send you patches or at least let you know that it's broken.

cheers
Ingo Molnar Sept. 24, 2009, 12:14 p.m. UTC | #8
* Michael Ellerman <michael@ellerman.id.au> wrote:

> On Wed, 2009-09-23 at 14:44 +0200, Ingo Molnar wrote:
> > * Michael Ellerman <michael@ellerman.id.au> wrote:
> > 
> > > On Tue, 2009-09-22 at 18:00 +1000, Benjamin Herrenschmidt wrote:
> > > > On Tue, 2009-09-22 at 09:28 +0200, Ingo Molnar wrote:
> > > > > 
> > > > > Nevertheless you are right that i should have caught this particular 
> > > > > PowerPC build bug - i missed it - sorry about that!
> > > > > 
> > > > Allright. Well, to help in general, we are setting up a build-bot
> > > > here too that will build -tip HEAD for at least powerpc daily with
> > > > a few configs too.
> > > 
> > > Results here:
> > > 
> > > http://kisskb.ellerman.id.au/kisskb/branch/12/
> > 
> > ok, seems green for today - the two failures are: one a powerpc 
> > toolchain problem it appears, plus a mainline warning.
> 
> Yep that looks more or less normal.
> 
> > Btw., for me to be able to notice failures there it would have to 
> > email me automatically if there's any -tip build failures that do 
> > not occur with the upstream branch. Does it have such a feature?
> 
> Not really, it sends mails to me, but it doesn't have a way to filter 
> them by branch. I think the plan is we'll keep an eye on it and either 
> send you patches or at least let you know that it's broken.

how many mails are those per day, typically? If there's not too many and 
if there's a way to send all of them to me i could post-filter them for 
-tip relevance. If that is feasible. You bouncing it to me later is 
certainly also a solution. (but lengthens the latency of fixes, 
obviously.)

	Ingo
Michael Ellerman Sept. 24, 2009, 1:25 p.m. UTC | #9
On Thu, 2009-09-24 at 14:14 +0200, Ingo Molnar wrote:
> * Michael Ellerman <michael@ellerman.id.au> wrote:
> 
> > On Wed, 2009-09-23 at 14:44 +0200, Ingo Molnar wrote:
> > > * Michael Ellerman <michael@ellerman.id.au> wrote:
> > > 
> > > > On Tue, 2009-09-22 at 18:00 +1000, Benjamin Herrenschmidt wrote:
> > > > > On Tue, 2009-09-22 at 09:28 +0200, Ingo Molnar wrote:
> > > > > > 
> > > > > > Nevertheless you are right that i should have caught this particular 
> > > > > > PowerPC build bug - i missed it - sorry about that!
> > > > > > 
> > > > > Allright. Well, to help in general, we are setting up a build-bot
> > > > > here too that will build -tip HEAD for at least powerpc daily with
> > > > > a few configs too.
> > > > 
> > > > Results here:
> > > > 
> > > > http://kisskb.ellerman.id.au/kisskb/branch/12/
> > > 
> > > ok, seems green for today - the two failures are: one a powerpc 
> > > toolchain problem it appears, plus a mainline warning.
> > 
> > Yep that looks more or less normal.
> > 
> > > Btw., for me to be able to notice failures there it would have to 
> > > email me automatically if there's any -tip build failures that do 
> > > not occur with the upstream branch. Does it have such a feature?
> > 
> > Not really, it sends mails to me, but it doesn't have a way to filter 
> > them by branch. I think the plan is we'll keep an eye on it and either 
> > send you patches or at least let you know that it's broken.
> 
> how many mails are those per day, typically? If there's not too many and 
> if there's a way to send all of them to me i could post-filter them for 
> -tip relevance. If that is feasible. You bouncing it to me later is 
> certainly also a solution. (but lengthens the latency of fixes, 
> obviously.)

Lots of mails - there are an insane number of arm configs for
starters :)

Give me a day or two, I should be able to add a per-branch setting for
who to send mails to without too much trouble.

cheers
Stephen Rothwell Sept. 24, 2009, 11:58 p.m. UTC | #10
Hi Ingo,

On Thu, 24 Sep 2009 23:25:55 +1000 Michael Ellerman <michael@ellerman.id.au> wrote:
>
> Give me a day or two, I should be able to add a per-branch setting for
> who to send mails to without too much trouble.

In the mean time I don't now if someone has pointed you at these today:

http://kisskb.ellerman.id.au/kisskb/branch/12/
Ingo Molnar Oct. 1, 2009, 7:42 a.m. UTC | #11
* Stephen Rothwell <sfr@canb.auug.org.au> wrote:

> Hi Ingo,
> 
> On Thu, 24 Sep 2009 23:25:55 +1000 Michael Ellerman <michael@ellerman.id.au> wrote:
> >
> > Give me a day or two, I should be able to add a per-branch setting for
> > who to send mails to without too much trouble.
> 
> In the mean time I don't now if someone has pointed you at these today:
> 
> http://kisskb.ellerman.id.au/kisskb/branch/12/

That's an upstream warning.

-tip supports fail-on-build-warnings build mode (for the whole kernel) 
via the CONFIG_ALLOW_WARNINGS .config setting. So if you do allnoconfig 
builds, make sure you turn on CONFIG_ALLOW_WARNINGS=y to get the same 
build behavior as with Linus's tree.

	Ingo
Stephen Rothwell Oct. 1, 2009, 11:13 a.m. UTC | #12
Hi Ingo,

On Thu, 1 Oct 2009 09:42:01 +0200 Ingo Molnar <mingo@elte.hu> wrote:
>
> * Stephen Rothwell <sfr@canb.auug.org.au> wrote:
> 
> > On Thu, 24 Sep 2009 23:25:55 +1000 Michael Ellerman <michael@ellerman.id.au> wrote:
> > >
> > > Give me a day or two, I should be able to add a per-branch setting for
> > > who to send mails to without too much trouble.
> > 
> > In the mean time I don't now if someone has pointed you at these today:
> > 
> > http://kisskb.ellerman.id.au/kisskb/branch/12/
> 
> That's an upstream warning.

When I sent that to you, I was referring to these build results:

http://kisskb.ellerman.id.au/kisskb/buildresult/1307802/

which (as you say) was only a waning in the rest of the kernel but is
turned into an error by the -Werror we use when building arch/powerpc.
The warning came from commit 4765c1db84c73f775eb1822a009117cbae524e9e
("rcu-tiny: The Bloatwatch Edition, v6") in the -tip tree and fixed by
commit 5ef9b8e59c043624fb44e31439cecf7f8b4cd62a ("rcu: Clean up the
warning message about RCU not defined") the next day (in the -tip tree).
So no big problem.  Neither of these commits are upstream.

I was just filling in the role of notifying you of build problems until
Michael can automate it.
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h
index 2499aaa..3288ce3 100644
--- a/arch/powerpc/include/asm/perf_event.h
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -14,7 +14,7 @@ 
 
 #define MAX_HWEVENTS		8
 #define MAX_EVENT_ALTERNATIVES	8
-#define MAX_LIMITED_HWEVENTS	2
+#define MAX_LIMITED_HWCOUNTERS	2
 
 /*
  * This struct provides the constants and functions needed to
@@ -22,7 +22,7 @@ 
  */
 struct power_pmu {
 	const char	*name;
-	int		n_event;
+	int		n_counter;
 	int		max_alternatives;
 	unsigned long	add_fields;
 	unsigned long	test_adder;
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 197b7d9..bbcbae1 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -30,8 +30,8 @@  struct cpu_hw_events {
 	u64 events[MAX_HWEVENTS];
 	unsigned int flags[MAX_HWEVENTS];
 	unsigned long mmcr[3];
-	struct perf_event *limited_event[MAX_LIMITED_HWEVENTS];
-	u8  limited_hwidx[MAX_LIMITED_HWEVENTS];
+	struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
+	u8  limited_hwidx[MAX_LIMITED_HWCOUNTERS];
 	u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
 	unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
 	unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
@@ -253,7 +253,7 @@  static int power_check_constraints(struct cpu_hw_events *cpuhw,
 	unsigned long addf = ppmu->add_fields;
 	unsigned long tadd = ppmu->test_adder;
 
-	if (n_ev > ppmu->n_event)
+	if (n_ev > ppmu->n_counter)
 		return -1;
 
 	/* First see if the events will go on as-is */
@@ -426,7 +426,7 @@  static int is_limited_pmc(int pmcnum)
 		&& (pmcnum == 5 || pmcnum == 6);
 }
 
-static void freeze_limited_events(struct cpu_hw_events *cpuhw,
+static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
 				    unsigned long pmc5, unsigned long pmc6)
 {
 	struct perf_event *event;
@@ -434,7 +434,7 @@  static void freeze_limited_events(struct cpu_hw_events *cpuhw,
 	int i;
 
 	for (i = 0; i < cpuhw->n_limited; ++i) {
-		event = cpuhw->limited_event[i];
+		event = cpuhw->limited_counter[i];
 		if (!event->hw.idx)
 			continue;
 		val = (event->hw.idx == 5) ? pmc5 : pmc6;
@@ -445,7 +445,7 @@  static void freeze_limited_events(struct cpu_hw_events *cpuhw,
 	}
 }
 
-static void thaw_limited_events(struct cpu_hw_events *cpuhw,
+static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
 				  unsigned long pmc5, unsigned long pmc6)
 {
 	struct perf_event *event;
@@ -453,7 +453,7 @@  static void thaw_limited_events(struct cpu_hw_events *cpuhw,
 	int i;
 
 	for (i = 0; i < cpuhw->n_limited; ++i) {
-		event = cpuhw->limited_event[i];
+		event = cpuhw->limited_counter[i];
 		event->hw.idx = cpuhw->limited_hwidx[i];
 		val = (event->hw.idx == 5) ? pmc5 : pmc6;
 		atomic64_set(&event->hw.prev_count, val);
@@ -495,9 +495,9 @@  static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
 		       "i" (SPRN_PMC5), "i" (SPRN_PMC6));
 
 	if (mmcr0 & MMCR0_FC)
-		freeze_limited_events(cpuhw, pmc5, pmc6);
+		freeze_limited_counters(cpuhw, pmc5, pmc6);
 	else
-		thaw_limited_events(cpuhw, pmc5, pmc6);
+		thaw_limited_counters(cpuhw, pmc5, pmc6);
 
 	/*
 	 * Write the full MMCR0 including the event overflow interrupt
@@ -653,7 +653,7 @@  void hw_perf_enable(void)
 			continue;
 		idx = hwc_index[i] + 1;
 		if (is_limited_pmc(idx)) {
-			cpuhw->limited_event[n_lim] = event;
+			cpuhw->limited_counter[n_lim] = event;
 			cpuhw->limited_hwidx[n_lim] = idx;
 			++n_lim;
 			continue;
@@ -702,7 +702,7 @@  static int collect_events(struct perf_event *group, int max_count,
 		flags[n] = group->hw.event_base;
 		events[n++] = group->hw.config;
 	}
-	list_for_each_entry(event, &group->sibling_list, list_entry) {
+	list_for_each_entry(event, &group->sibling_list, group_entry) {
 		if (!is_software_event(event) &&
 		    event->state != PERF_EVENT_STATE_OFF) {
 			if (n >= max_count)
@@ -742,7 +742,7 @@  int hw_perf_group_sched_in(struct perf_event *group_leader,
 		return 0;
 	cpuhw = &__get_cpu_var(cpu_hw_events);
 	n0 = cpuhw->n_events;
-	n = collect_events(group_leader, ppmu->n_event - n0,
+	n = collect_events(group_leader, ppmu->n_counter - n0,
 			   &cpuhw->event[n0], &cpuhw->events[n0],
 			   &cpuhw->flags[n0]);
 	if (n < 0)
@@ -764,7 +764,7 @@  int hw_perf_group_sched_in(struct perf_event *group_leader,
 	cpuctx->active_oncpu += n;
 	n = 1;
 	event_sched_in(group_leader, cpu);
-	list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
+	list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
 		if (sub->state != PERF_EVENT_STATE_OFF) {
 			event_sched_in(sub, cpu);
 			++n;
@@ -797,7 +797,7 @@  static int power_pmu_enable(struct perf_event *event)
 	 */
 	cpuhw = &__get_cpu_var(cpu_hw_events);
 	n0 = cpuhw->n_events;
-	if (n0 >= ppmu->n_event)
+	if (n0 >= ppmu->n_counter)
 		goto out;
 	cpuhw->event[n0] = event;
 	cpuhw->events[n0] = event->hw.config;
@@ -848,11 +848,11 @@  static void power_pmu_disable(struct perf_event *event)
 		}
 	}
 	for (i = 0; i < cpuhw->n_limited; ++i)
-		if (event == cpuhw->limited_event[i])
+		if (event == cpuhw->limited_counter[i])
 			break;
 	if (i < cpuhw->n_limited) {
 		while (++i < cpuhw->n_limited) {
-			cpuhw->limited_event[i-1] = cpuhw->limited_event[i];
+			cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
 			cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
 		}
 		--cpuhw->n_limited;
@@ -1078,7 +1078,7 @@  const struct pmu *hw_perf_event_init(struct perf_event *event)
 	 */
 	n = 0;
 	if (event->group_leader != event) {
-		n = collect_events(event->group_leader, ppmu->n_event - 1,
+		n = collect_events(event->group_leader, ppmu->n_counter - 1,
 				   ctrs, events, cflags);
 		if (n < 0)
 			return ERR_PTR(-EINVAL);
@@ -1230,7 +1230,7 @@  static void perf_event_interrupt(struct pt_regs *regs)
 	int nmi;
 
 	if (cpuhw->n_limited)
-		freeze_limited_events(cpuhw, mfspr(SPRN_PMC5),
+		freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
 					mfspr(SPRN_PMC6));
 
 	perf_read_regs(regs);
@@ -1260,7 +1260,7 @@  static void perf_event_interrupt(struct pt_regs *regs)
 	 * Any that we processed in the previous loop will not be negative.
 	 */
 	if (!found) {
-		for (i = 0; i < ppmu->n_event; ++i) {
+		for (i = 0; i < ppmu->n_counter; ++i) {
 			if (is_limited_pmc(i + 1))
 				continue;
 			val = read_pmc(i + 1);