Message ID | 20130906032209.GF29710@iris.ozlabs.ibm.com |
---|---|
State | New, archived |
Headers | show |
On 05.09.2013, at 22:22, Paul Mackerras wrote: > POWER7 and later IBM server processors have a register called the > Program Priority Register (PPR), which controls the priority of > each hardware CPU SMT thread, and affects how fast it runs compared > to other SMT threads. This priority can be controlled by writing to > the PPR or by use of a set of instructions of the form or rN,rN,rN > which are otherwise no-ops but have been defined to set the priority > to particular levels. > > This adds code to context switch the PPR when entering and exiting > guests and to make the PPR value accessible through the SET/GET_ONE_REG > interface. When entering the guest, we set the PPR as late as > possible, because if we are setting a low thread priority it will > make the code run slowly from that point on. Similarly, the > first-level interrupt handlers save the PPR value in the PACA very > early on, and set the thread priority to the medium level, so that > the interrupt handling code runs at a reasonable speed. > > Signed-off-by: Paul Mackerras <paulus@samba.org> Reviewed-by: Alexander Graf <agraf@suse.de> Alex -- To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Fri, 2013-09-06 at 13:22 +1000, Paul Mackerras wrote: > POWER7 and later IBM server processors have a register called the > Program Priority Register (PPR), which controls the priority of > each hardware CPU SMT thread, and affects how fast it runs compared > to other SMT threads. This priority can be controlled by writing to > the PPR or by use of a set of instructions of the form or rN,rN,rN > which are otherwise no-ops but have been defined to set the priority > to particular levels. > > This adds code to context switch the PPR when entering and exiting > guests and to make the PPR value accessible through the SET/GET_ONE_REG > interface. When entering the guest, we set the PPR as late as > possible, because if we are setting a low thread priority it will > make the code run slowly from that point on. Similarly, the > first-level interrupt handlers save the PPR value in the PACA very > early on, and set the thread priority to the medium level, so that > the interrupt handling code runs at a reasonable speed. > > Signed-off-by: Paul Mackerras <paulus@samba.org> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Alex, can you take this via your tree ? Cheers, Ben. > --- > Documentation/virtual/kvm/api.txt | 1 + > arch/powerpc/include/asm/exception-64s.h | 8 ++++++++ > arch/powerpc/include/asm/kvm_book3s_asm.h | 1 + > arch/powerpc/include/asm/kvm_host.h | 1 + > arch/powerpc/include/uapi/asm/kvm.h | 1 + > arch/powerpc/kernel/asm-offsets.c | 2 ++ > arch/powerpc/kvm/book3s_hv.c | 6 ++++++ > arch/powerpc/kvm/book3s_hv_rmhandlers.S | 12 +++++++++++- > 8 files changed, 31 insertions(+), 1 deletion(-) > > diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt > index 1030ac9..34a32b6 100644 > --- a/Documentation/virtual/kvm/api.txt > +++ b/Documentation/virtual/kvm/api.txt > @@ -1836,6 +1836,7 @@ registers, find a list below: > PPC | KVM_REG_PPC_ACOP | 64 > PPC | KVM_REG_PPC_VRSAVE | 32 > PPC | KVM_REG_PPC_LPCR | 64 > + PPC | KVM_REG_PPC_PPR | 64 > PPC | KVM_REG_PPC_TM_GPR0 | 64 > ... > PPC | KVM_REG_PPC_TM_GPR31 | 64 > diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h > index 07ca627..b86c4db 100644 > --- a/arch/powerpc/include/asm/exception-64s.h > +++ b/arch/powerpc/include/asm/exception-64s.h > @@ -203,6 +203,10 @@ do_kvm_##n: \ > ld r10,area+EX_CFAR(r13); \ > std r10,HSTATE_CFAR(r13); \ > END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \ > + BEGIN_FTR_SECTION_NESTED(948) \ > + ld r10,area+EX_PPR(r13); \ > + std r10,HSTATE_PPR(r13); \ > + END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \ > ld r10,area+EX_R10(r13); \ > stw r9,HSTATE_SCRATCH1(r13); \ > ld r9,area+EX_R9(r13); \ > @@ -216,6 +220,10 @@ do_kvm_##n: \ > ld r10,area+EX_R10(r13); \ > beq 89f; \ > stw r9,HSTATE_SCRATCH1(r13); \ > + BEGIN_FTR_SECTION_NESTED(948) \ > + ld r9,area+EX_PPR(r13); \ > + std r9,HSTATE_PPR(r13); \ > + END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \ > ld r9,area+EX_R9(r13); \ > std r12,HSTATE_SCRATCH0(r13); \ > li r12,n; \ > diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h > index 9039d3c..22f4606 100644 > --- a/arch/powerpc/include/asm/kvm_book3s_asm.h > +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h > @@ -101,6 +101,7 @@ struct kvmppc_host_state { > #endif > #ifdef CONFIG_PPC_BOOK3S_64 > u64 cfar; > + u64 ppr; > #endif > }; > > diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h > index 9741bf0..b0dcd18 100644 > --- a/arch/powerpc/include/asm/kvm_host.h > +++ b/arch/powerpc/include/asm/kvm_host.h > @@ -464,6 +464,7 @@ struct kvm_vcpu_arch { > u32 ctrl; > ulong dabr; > ulong cfar; > + ulong ppr; > #endif > u32 vrsave; /* also USPRG0 */ > u32 mmucr; > diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h > index e42127d..fab6bc1 100644 > --- a/arch/powerpc/include/uapi/asm/kvm.h > +++ b/arch/powerpc/include/uapi/asm/kvm.h > @@ -534,6 +534,7 @@ struct kvm_get_htab_header { > > #define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) > #define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5) > +#define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6) > > /* Transactional Memory checkpointed state: > * This is all GPRs, all VSX regs and a subset of SPRs > diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c > index ccb42cd..5c6ea96 100644 > --- a/arch/powerpc/kernel/asm-offsets.c > +++ b/arch/powerpc/kernel/asm-offsets.c > @@ -516,6 +516,7 @@ int main(void) > DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); > DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid)); > DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); > + DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); > DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); > DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); > DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); > @@ -600,6 +601,7 @@ int main(void) > > #ifdef CONFIG_PPC_BOOK3S_64 > HSTATE_FIELD(HSTATE_CFAR, cfar); > + HSTATE_FIELD(HSTATE_PPR, ppr); > #endif /* CONFIG_PPC_BOOK3S_64 */ > > #else /* CONFIG_PPC_BOOK3S */ > diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c > index 9c878d7..eceff7e 100644 > --- a/arch/powerpc/kvm/book3s_hv.c > +++ b/arch/powerpc/kvm/book3s_hv.c > @@ -814,6 +814,9 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) > case KVM_REG_PPC_LPCR: > *val = get_reg_val(id, vcpu->kvm->arch.lpcr); > break; > + case KVM_REG_PPC_PPR: > + *val = get_reg_val(id, vcpu->arch.ppr); > + break; > default: > r = -EINVAL; > break; > @@ -921,6 +924,9 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) > case KVM_REG_PPC_LPCR: > kvmppc_set_lpcr(vcpu, set_reg_val(id, *val)); > break; > + case KVM_REG_PPC_PPR: > + vcpu->arch.ppr = set_reg_val(id, *val); > + break; > default: > r = -EINVAL; > break; > diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S > index 85f8dd0..88e7068 100644 > --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S > +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S > @@ -561,13 +561,15 @@ BEGIN_FTR_SECTION > ld r5, VCPU_CFAR(r4) > mtspr SPRN_CFAR, r5 > END_FTR_SECTION_IFSET(CPU_FTR_CFAR) > +BEGIN_FTR_SECTION > + ld r0, VCPU_PPR(r4) > +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) > > ld r5, VCPU_LR(r4) > lwz r6, VCPU_CR(r4) > mtlr r5 > mtcr r6 > > - ld r0, VCPU_GPR(R0)(r4) > ld r1, VCPU_GPR(R1)(r4) > ld r2, VCPU_GPR(R2)(r4) > ld r3, VCPU_GPR(R3)(r4) > @@ -581,6 +583,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) > ld r12, VCPU_GPR(R12)(r4) > ld r13, VCPU_GPR(R13)(r4) > > +BEGIN_FTR_SECTION > + mtspr SPRN_PPR, r0 > +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) > + ld r0, VCPU_GPR(R0)(r4) > ld r4, VCPU_GPR(R4)(r4) > > hrfid > @@ -631,6 +637,10 @@ BEGIN_FTR_SECTION > ld r3, HSTATE_CFAR(r13) > std r3, VCPU_CFAR(r9) > END_FTR_SECTION_IFSET(CPU_FTR_CFAR) > +BEGIN_FTR_SECTION > + ld r4, HSTATE_PPR(r13) > + std r4, VCPU_PPR(r9) > +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) > > /* Restore R1/R2 so we can handle faults */ > ld r1, HSTATE_HOST_R1(r13) -- To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 16.09.2013, at 22:29, Benjamin Herrenschmidt wrote: > On Fri, 2013-09-06 at 13:22 +1000, Paul Mackerras wrote: >> POWER7 and later IBM server processors have a register called the >> Program Priority Register (PPR), which controls the priority of >> each hardware CPU SMT thread, and affects how fast it runs compared >> to other SMT threads. This priority can be controlled by writing to >> the PPR or by use of a set of instructions of the form or rN,rN,rN >> which are otherwise no-ops but have been defined to set the priority >> to particular levels. >> >> This adds code to context switch the PPR when entering and exiting >> guests and to make the PPR value accessible through the SET/GET_ONE_REG >> interface. When entering the guest, we set the PPR as late as >> possible, because if we are setting a low thread priority it will >> make the code run slowly from that point on. Similarly, the >> first-level interrupt handlers save the PPR value in the PACA very >> early on, and set the thread priority to the medium level, so that >> the interrupt handling code runs at a reasonable speed. >> >> Signed-off-by: Paul Mackerras <paulus@samba.org> > > Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> > > Alex, can you take this via your tree ? Yes, on the next respin :). Or is this one urgent? Alex -- To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 1030ac9..34a32b6 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -1836,6 +1836,7 @@ registers, find a list below: PPC | KVM_REG_PPC_ACOP | 64 PPC | KVM_REG_PPC_VRSAVE | 32 PPC | KVM_REG_PPC_LPCR | 64 + PPC | KVM_REG_PPC_PPR | 64 PPC | KVM_REG_PPC_TM_GPR0 | 64 ... PPC | KVM_REG_PPC_TM_GPR31 | 64 diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 07ca627..b86c4db 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -203,6 +203,10 @@ do_kvm_##n: \ ld r10,area+EX_CFAR(r13); \ std r10,HSTATE_CFAR(r13); \ END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \ + BEGIN_FTR_SECTION_NESTED(948) \ + ld r10,area+EX_PPR(r13); \ + std r10,HSTATE_PPR(r13); \ + END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \ ld r10,area+EX_R10(r13); \ stw r9,HSTATE_SCRATCH1(r13); \ ld r9,area+EX_R9(r13); \ @@ -216,6 +220,10 @@ do_kvm_##n: \ ld r10,area+EX_R10(r13); \ beq 89f; \ stw r9,HSTATE_SCRATCH1(r13); \ + BEGIN_FTR_SECTION_NESTED(948) \ + ld r9,area+EX_PPR(r13); \ + std r9,HSTATE_PPR(r13); \ + END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \ ld r9,area+EX_R9(r13); \ std r12,HSTATE_SCRATCH0(r13); \ li r12,n; \ diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 9039d3c..22f4606 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -101,6 +101,7 @@ struct kvmppc_host_state { #endif #ifdef CONFIG_PPC_BOOK3S_64 u64 cfar; + u64 ppr; #endif }; diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 9741bf0..b0dcd18 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -464,6 +464,7 @@ struct kvm_vcpu_arch { u32 ctrl; ulong dabr; ulong cfar; + ulong ppr; #endif u32 vrsave; /* also USPRG0 */ u32 mmucr; diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index e42127d..fab6bc1 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -534,6 +534,7 @@ struct kvm_get_htab_header { #define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) #define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5) +#define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6) /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index ccb42cd..5c6ea96 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -516,6 +516,7 @@ int main(void) DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid)); DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); + DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); @@ -600,6 +601,7 @@ int main(void) #ifdef CONFIG_PPC_BOOK3S_64 HSTATE_FIELD(HSTATE_CFAR, cfar); + HSTATE_FIELD(HSTATE_PPR, ppr); #endif /* CONFIG_PPC_BOOK3S_64 */ #else /* CONFIG_PPC_BOOK3S */ diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 9c878d7..eceff7e 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -814,6 +814,9 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) case KVM_REG_PPC_LPCR: *val = get_reg_val(id, vcpu->kvm->arch.lpcr); break; + case KVM_REG_PPC_PPR: + *val = get_reg_val(id, vcpu->arch.ppr); + break; default: r = -EINVAL; break; @@ -921,6 +924,9 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) case KVM_REG_PPC_LPCR: kvmppc_set_lpcr(vcpu, set_reg_val(id, *val)); break; + case KVM_REG_PPC_PPR: + vcpu->arch.ppr = set_reg_val(id, *val); + break; default: r = -EINVAL; break; diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 85f8dd0..88e7068 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -561,13 +561,15 @@ BEGIN_FTR_SECTION ld r5, VCPU_CFAR(r4) mtspr SPRN_CFAR, r5 END_FTR_SECTION_IFSET(CPU_FTR_CFAR) +BEGIN_FTR_SECTION + ld r0, VCPU_PPR(r4) +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r5, VCPU_LR(r4) lwz r6, VCPU_CR(r4) mtlr r5 mtcr r6 - ld r0, VCPU_GPR(R0)(r4) ld r1, VCPU_GPR(R1)(r4) ld r2, VCPU_GPR(R2)(r4) ld r3, VCPU_GPR(R3)(r4) @@ -581,6 +583,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) ld r12, VCPU_GPR(R12)(r4) ld r13, VCPU_GPR(R13)(r4) +BEGIN_FTR_SECTION + mtspr SPRN_PPR, r0 +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) + ld r0, VCPU_GPR(R0)(r4) ld r4, VCPU_GPR(R4)(r4) hrfid @@ -631,6 +637,10 @@ BEGIN_FTR_SECTION ld r3, HSTATE_CFAR(r13) std r3, VCPU_CFAR(r9) END_FTR_SECTION_IFSET(CPU_FTR_CFAR) +BEGIN_FTR_SECTION + ld r4, HSTATE_PPR(r13) + std r4, VCPU_PPR(r9) +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) /* Restore R1/R2 so we can handle faults */ ld r1, HSTATE_HOST_R1(r13)
POWER7 and later IBM server processors have a register called the Program Priority Register (PPR), which controls the priority of each hardware CPU SMT thread, and affects how fast it runs compared to other SMT threads. This priority can be controlled by writing to the PPR or by use of a set of instructions of the form or rN,rN,rN which are otherwise no-ops but have been defined to set the priority to particular levels. This adds code to context switch the PPR when entering and exiting guests and to make the PPR value accessible through the SET/GET_ONE_REG interface. When entering the guest, we set the PPR as late as possible, because if we are setting a low thread priority it will make the code run slowly from that point on. Similarly, the first-level interrupt handlers save the PPR value in the PACA very early on, and set the thread priority to the medium level, so that the interrupt handling code runs at a reasonable speed. Signed-off-by: Paul Mackerras <paulus@samba.org> --- Documentation/virtual/kvm/api.txt | 1 + arch/powerpc/include/asm/exception-64s.h | 8 ++++++++ arch/powerpc/include/asm/kvm_book3s_asm.h | 1 + arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/include/uapi/asm/kvm.h | 1 + arch/powerpc/kernel/asm-offsets.c | 2 ++ arch/powerpc/kvm/book3s_hv.c | 6 ++++++ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 12 +++++++++++- 8 files changed, 31 insertions(+), 1 deletion(-)