From patchwork Fri Jun 12 22:45:51 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Aurelien Jarno X-Patchwork-Id: 483782 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [IPv6:2001:4830:134:3::11]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (No client certificate requested) by ozlabs.org (Postfix) with ESMTPS id ADA47140348 for ; Sat, 13 Jun 2015 08:47:06 +1000 (AEST) Received: from localhost ([::1]:54146 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Z3Xj6-00077X-PW for incoming@patchwork.ozlabs.org; Fri, 12 Jun 2015 18:47:04 -0400 Received: from eggs.gnu.org ([2001:4830:134:3::10]:44409) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Z3XiR-0005sE-W6 for qemu-devel@nongnu.org; Fri, 12 Jun 2015 18:46:25 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1Z3XiO-0004fu-Gq for qemu-devel@nongnu.org; Fri, 12 Jun 2015 18:46:23 -0400 Received: from hall.aurel32.net ([2001:bc8:30d7:101::1]:35123) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Z3XiO-0004e8-7v for qemu-devel@nongnu.org; Fri, 12 Jun 2015 18:46:20 -0400 Received: from weber.rr44.fr ([2001:470:d4ed:0:7e05:7ff:fe0d:f152]) by hall.aurel32.net with esmtps (TLS1.2:ECDHE_RSA_AES_128_GCM_SHA256:128) (Exim 4.84) (envelope-from ) id 1Z3XiD-0003pp-IW; Sat, 13 Jun 2015 00:46:09 +0200 Received: from aurel32 by weber.rr44.fr with local (Exim 4.85) (envelope-from ) id 1Z3XiB-0004ME-Aq; Sat, 13 Jun 2015 00:46:07 +0200 From: Aurelien Jarno To: qemu-devel@nongnu.org Date: Sat, 13 Jun 2015 00:45:51 +0200 Message-Id: <1434149163-16639-4-git-send-email-aurelien@aurel32.net> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1434149163-16639-1-git-send-email-aurelien@aurel32.net> References: <1434149163-16639-1-git-send-email-aurelien@aurel32.net> X-detected-operating-system: by eggs.gnu.org: Error: Malformed IPv6 address (bad octet value). X-Received-From: 2001:bc8:30d7:101::1 Cc: Alexander Graf , Aurelien Jarno , Richard Henderson Subject: [Qemu-devel] [PATCH 03/15] target-s390x: mvc_fast_memset: access memory through softmmu X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org mvc_fast_memset is bypassing the softmmu functions, getting the physical address using the mmu_translate function and accessing the corresponding physical memory. This prevents watchpoints to work correctly. Instead use the tlb_vaddr_to_host function to get the host address corresponding to the guest address through the softmmu code and fallback to the byte level code in case the corresponding address is not in the QEMU TLB or being examined through a watchpoint. As a bonus it works even for area crossing pages by splitting the are into chunks contained in a single page, bringing some performances improvements. At the same time change the name of the function to fast_memset as it's not specific to mvc and use the same argument order as the C memset function. Cc: Alexander Graf Cc: Richard Henderson Signed-off-by: Aurelien Jarno --- target-s390x/mem_helper.c | 73 +++++++++++++++++++++-------------------------- 1 file changed, 32 insertions(+), 41 deletions(-) diff --git a/target-s390x/mem_helper.c b/target-s390x/mem_helper.c index b8d3a5f..947359b 100644 --- a/target-s390x/mem_helper.c +++ b/target-s390x/mem_helper.c @@ -65,30 +65,30 @@ static inline uint64_t adj_len_to_page(uint64_t len, uint64_t addr) return len; } -#ifndef CONFIG_USER_ONLY -static void mvc_fast_memset(CPUS390XState *env, uint32_t l, uint64_t dest, - uint8_t byte) +static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte, + uint32_t l) { - S390CPU *cpu = s390_env_get_cpu(env); - hwaddr dest_phys; - hwaddr len = l; - void *dest_p; - uint64_t asc = env->psw.mask & PSW_MASK_ASC; - int flags; - - if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags, true)) { - cpu_stb_data(env, dest, byte); - cpu_abort(CPU(cpu), "should never reach here"); + int mmu_idx = cpu_mmu_index(env); + + while (l > 0) { + void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx); + if (p) { + /* Access to the whole page in write mode granted. */ + int l_adj = adj_len_to_page(l, dest); + memset(p, byte, l_adj); + dest += l_adj; + l -= l_adj; + } else { + /* We failed to get access to the whole page. The next write + access will likely fill the QEMU TLB for the next iteration. */ + cpu_stb_data(env, dest, byte); + dest++; + l--; + } } - dest_phys |= dest & ~TARGET_PAGE_MASK; - - dest_p = cpu_physical_memory_map(dest_phys, &len, 1); - - memset(dest_p, byte, len); - - cpu_physical_memory_unmap(dest_p, 1, len, len); } +#ifndef CONFIG_USER_ONLY static void mvc_fast_memmove(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) { @@ -154,19 +154,11 @@ uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest, HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", __func__, l, dest, src); -#ifndef CONFIG_USER_ONLY /* xor with itself is the same as memset(0) */ - if ((l > 32) && (src == dest) && - (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) { - mvc_fast_memset(env, l + 1, dest, 0); - return 0; - } -#else if (src == dest) { - memset(g2h(dest), 0, l + 1); + fast_memset(env, dest, 0, l + 1); return 0; } -#endif for (i = 0; i <= l; i++) { x = cpu_ldub_data(env, dest + i) ^ cpu_ldub_data(env, src + i); @@ -208,24 +200,23 @@ void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", __func__, l, dest, src); + /* mvc with source pointing to the byte after the destination is the + same as memset with the first source byte */ + if (dest == (src + 1)) { + fast_memset(env, dest, cpu_ldub_data(env, src), l + 1); + return; + } #ifndef CONFIG_USER_ONLY if ((l > 32) && (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) && - (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) { - if (dest == (src + 1)) { - mvc_fast_memset(env, l + 1, dest, cpu_ldub_data(env, src)); - return; - } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) { - mvc_fast_memmove(env, l + 1, dest, src); - return; - } + (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK) && + (src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) { + mvc_fast_memmove(env, l + 1, dest, src); + return; } #else - if (dest == (src + 1)) { - memset(g2h(dest), cpu_ldub_data(env, src), l + 1); - return; /* mvc and memmove do not behave the same when areas overlap! */ - } else if ((dest < src) || (src + l < dest)) { + if ((dest < src) || (src + l < dest)) { memmove(g2h(dest), g2h(src), l + 1); return; }