Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/1.2/patches/2225350/?format=api
{ "id": 2225350, "url": "http://patchwork.ozlabs.org/api/1.2/patches/2225350/?format=api", "web_url": "http://patchwork.ozlabs.org/project/kvm-riscv/patch/20260420212004.3938325-20-seanjc@google.com/", "project": { "id": 70, "url": "http://patchwork.ozlabs.org/api/1.2/projects/70/?format=api", "name": "Linux KVM RISC-V", "link_name": "kvm-riscv", "list_id": "kvm-riscv.lists.infradead.org", "list_email": "kvm-riscv@lists.infradead.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "http://lists.infradead.org/pipermail/kvm-riscv/", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20260420212004.3938325-20-seanjc@google.com>", "list_archive_url": null, "date": "2026-04-20T21:20:04", "name": "[v3,19/19] KVM: selftests: Replace \"paddr\" with \"gpa\" throughout", "commit_ref": null, "pull_url": null, "state": "new", "archived": false, "hash": "dd0d0f6d6c98497bb404db7d9425ea0fe6762a8a", "submitter": { "id": 81022, "url": "http://patchwork.ozlabs.org/api/1.2/people/81022/?format=api", "name": "Sean Christopherson", "email": "seanjc@google.com" }, "delegate": null, "mbox": "http://patchwork.ozlabs.org/project/kvm-riscv/patch/20260420212004.3938325-20-seanjc@google.com/mbox/", "series": [ { "id": 500685, "url": "http://patchwork.ozlabs.org/api/1.2/series/500685/?format=api", "web_url": "http://patchwork.ozlabs.org/project/kvm-riscv/list/?series=500685", "date": "2026-04-20T21:19:45", "name": "KVM: selftests: Use kernel-style integer and g[vp]a_t types", "version": 3, "mbox": "http://patchwork.ozlabs.org/series/500685/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/2225350/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/2225350/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "\n <kvm-riscv-bounces+incoming=patchwork.ozlabs.org@lists.infradead.org>", "X-Original-To": "incoming@patchwork.ozlabs.org", "Delivered-To": "patchwork-incoming@legolas.ozlabs.org", "Authentication-Results": [ "legolas.ozlabs.org;\n\tdkim=pass (2048-bit key;\n secure) header.d=lists.infradead.org header.i=@lists.infradead.org\n header.a=rsa-sha256 header.s=bombadil.20210309 header.b=o+J92oqo;\n\tdkim=fail reason=\"signature verification failed\" (2048-bit key;\n secure) header.d=infradead.org header.i=@infradead.org header.a=rsa-sha256\n header.s=casper.20170209 header.b=GZvTGgxl;\n\tdkim=fail reason=\"signature verification failed\" (2048-bit key;\n unprotected) header.d=google.com header.i=@google.com header.a=rsa-sha256\n header.s=20251104 header.b=XMOOcee0;\n\tdkim-atps=neutral", "legolas.ozlabs.org;\n spf=none (no SPF record) smtp.mailfrom=lists.infradead.org\n (client-ip=2607:7c80:54:3::133; helo=bombadil.infradead.org;\n envelope-from=kvm-riscv-bounces+incoming=patchwork.ozlabs.org@lists.infradead.org;\n receiver=patchwork.ozlabs.org)" ], "Received": [ "from bombadil.infradead.org (bombadil.infradead.org\n [IPv6:2607:7c80:54:3::133])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519 server-signature ECDSA (secp384r1) server-digest SHA384)\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4fzz225KMPz1yCv\n\tfor <incoming@patchwork.ozlabs.org>; Tue, 21 Apr 2026 07:21:30 +1000 (AEST)", "from localhost ([::1] helo=bombadil.infradead.org)\n\tby bombadil.infradead.org with esmtp (Exim 4.98.2 #2 (Red Hat Linux))\n\tid 1wEw3b-00000007h3C-1ujD;\n\tMon, 20 Apr 2026 21:21:27 +0000", "from casper.infradead.org ([2001:8b0:10b:1236::1])\n\tby bombadil.infradead.org with esmtps (Exim 4.98.2 #2 (Red Hat Linux))\n\tid 1wEw31-00000007gBx-07sj\n\tfor kvm-riscv@bombadil.infradead.org;\n\tMon, 20 Apr 2026 21:20:51 +0000", "from mail-pg1-x54a.google.com ([2607:f8b0:4864:20::54a])\n\tby casper.infradead.org with esmtps (Exim 4.98.2 #2 (Red Hat Linux))\n\tid 1wEw2x-000000093Tc-2ixJ\n\tfor kvm-riscv@lists.infradead.org;\n\tMon, 20 Apr 2026 21:20:49 +0000", "by mail-pg1-x54a.google.com with SMTP id\n 41be03b00d2f7-b630b4d8d52so1717130a12.3\n for <kvm-riscv@lists.infradead.org>;\n Mon, 20 Apr 2026 14:20:47 -0700 (PDT)" ], "DKIM-Signature": [ "v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed;\n\td=lists.infradead.org; s=bombadil.20210309; h=Sender:\n\tContent-Transfer-Encoding:Content-Type:Reply-To:List-Subscribe:List-Help:\n\tList-Post:List-Archive:List-Unsubscribe:List-Id:Cc:To:From:Subject:Message-ID\n\t:References:Mime-Version:In-Reply-To:Date:Content-ID:Content-Description:\n\tResent-Date:Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:\n\tList-Owner; bh=gQiLPUqJJQhGSSG8P7/KzrUrdxat3mygyTVQw6YIZBc=; b=o+J92oqoxnbBqq\n\t8uYCUsxS6r5rkmSqIy04YQu3PvpCbBzqvs1POlj8CBCHps264pfuvC9Dwcme9V5mptaUgIzZnbwCw\n\tIN59M8GJFiWNtS8TWGh+PXnGQOUDOx/pRgPBTDguZ7YKUAdoVcz41n1WJbvqqDorT9qLB4+YufEM0\n\te3ZEAE9wSy5FDT5fQL/XMHAxwhKxW4l607pC8fz9elvnRcy8PfhwDgcqUVbrSnRVTzYvS87TUIPUf\n\taV+74SE1m/0b7piOAUiVxUmc0ZcMWK/WTc/ra61jLEzYCMyos/RAFKjPxXDTQSQKx6R9YpDM4zjQF\n\t/Qq9P6W1Olb4IynnRd/A==;", "v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed;\n\td=infradead.org; s=casper.20170209; h=Content-Type:Cc:To:From:Subject:\n\tMessage-ID:References:Mime-Version:In-Reply-To:Date:Reply-To:Sender:\n\tContent-Transfer-Encoding:Content-ID:Content-Description;\n\tbh=LVQ/LQLhE4omx5BYLMqaubz6HBWDiRbU4QU9/ukTiik=; b=GZvTGgxlklGuc2BDUxPa4UWDVC\n\tAyYzltEN2XFvXkHZNzFT8FB7XFNlg4Hi0ui8k5UQnHN7sq0Qt2Hv6MOPTzLgJjMd1F2RoMjHJ5xK7\n\t3oPLCcCSHjiVzDQBAFl00cGPB5hKV+wG0nPaCSJy24MQVsViMEEAdqTofHnYzgQ6WnR9xK1I/JVOO\n\tzczUfn30nrrf2FwvZvWaSgbYPNsFi7k/55KuWkZEvuMvQ5SrfpEYQagr4WajHZkSRLakmI7EQotXs\n\tu1iIU5wV5m2WRI2lZMSfYWbc8pnzh7a5reqnSBihm5Fggjsx4/5OSbDs35Sh6DH5qf90saDXd6ovZ\n\tuAfmUxOw==;", "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=google.com; s=20251104; t=1776720044; x=1777324844;\n darn=lists.infradead.org;\n h=cc:to:from:subject:message-id:references:mime-version:in-reply-to\n :date:reply-to:from:to:cc:subject:date:message-id:reply-to;\n bh=LVQ/LQLhE4omx5BYLMqaubz6HBWDiRbU4QU9/ukTiik=;\n b=XMOOcee0bBbEkL6RqwLCZdZVDE+wvwB6Y2h4OUvoTfV/ID2/PpePOGItBNXLltLFVI\n tZMhr28vzYXzlKT1HtyIPzg6yva09GWBuYY3CDUjKuiDoqerBZnf4jPmSdHYNlN5a/3+\n HINEuTrrQybYRnNm0CA79eagsr0XjOFY0AhAf4eQ8WBjm1Sh3gpnZVWK0t+e7I0TyoMc\n hNL5D+X9Fb7U1W1k+zA6QnTbVWanzt4i/ThhM6QDD2/Jf2o6Yr4kGysou9qpKWDlRw0K\n XBQpYcbLbjAdtuCP5ekA0vF2/Jbio5lmvnypVGehqxQNbmnoHFYXZWPOQydbH3W+fuYY\n S3rA==" ], "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=1e100.net; s=20251104; t=1776720044; x=1777324844;\n h=cc:to:from:subject:message-id:references:mime-version:in-reply-to\n :date:reply-to:x-gm-message-state:from:to:cc:subject:date:message-id\n :reply-to;\n bh=LVQ/LQLhE4omx5BYLMqaubz6HBWDiRbU4QU9/ukTiik=;\n b=bxiZRinkCmYaeSoR3YJq+1adceQ3FJDlurHtn/F4lK48cOCDX3CdFaLOSJnOSOnElj\n OUxVae0VPz2mID5vASwHAgAyyQMcxxh8wJL9tTmMJjafHeXbwq8pI8qTsJcvVjKeiWQo\n Xdva7k6Bb+TEfZX5pJnFFle0xm5NBcHlwPP4eZx4JpmEn0EhP/hi3ruupJ72XwzK0dZ/\n zYbHT8HrJigVG4rktd1Q9E7hiBCmqGVDyVX79tvOUxMQN86Abda1jQCmKseF/nNnaLTU\n khBzTcHoVzem3kyGsDOiZJnxNmvGQpAtTFcpAkhEscR61p6eQA3TuvHn4BHwSIZcPNuD\n 8LoA==", "X-Forwarded-Encrypted": "i=1;\n AFNElJ9KQOpackH3ZHXx5g0/keIPYoyS9JX/siB+7wMx8q7EgLbsEHZhbSBfCoTyK6a4whxa83rYHDn6IDM=@lists.infradead.org", "X-Gm-Message-State": "AOJu0Yw+Q+XKjoWJ7l7nOM/jPIkvJ2wzm51TdTdAxfmkb/RnS83KYJrc\n\t4hE0W0823Dq8gYRF4XEavlP9iNQ5Sd7i6toOVauVdhX+mdWCnc+4ZUDr5hlewWBg25CHYLf8oVu\n\tAIMWdiA==", "X-Received": "from pgbcs14.prod.google.com\n ([2002:a05:6a02:418e:b0:c76:478c:813f])\n (user=seanjc job=prod-delivery.src-stubby-dispatcher) by\n 2002:a05:6a21:3288:b0:398:6461:688c\n with SMTP id adf61e73a8af0-3a08d687640mr17010739637.2.1776720044306; Mon, 20\n Apr 2026 14:20:44 -0700 (PDT)", "Date": "Mon, 20 Apr 2026 14:20:04 -0700", "In-Reply-To": "<20260420212004.3938325-1-seanjc@google.com>", "Mime-Version": "1.0", "References": "<20260420212004.3938325-1-seanjc@google.com>", "X-Mailer": "git-send-email 2.54.0.rc1.555.g9c883467ad-goog", "Message-ID": "<20260420212004.3938325-20-seanjc@google.com>", "Subject": "[PATCH v3 19/19] KVM: selftests: Replace \"paddr\" with \"gpa\"\n throughout", "From": "Sean Christopherson <seanjc@google.com>", "To": "Paolo Bonzini <pbonzini@redhat.com>, Marc Zyngier <maz@kernel.org>,\n\tOliver Upton <oupton@kernel.org>, Tianrui Zhao <zhaotianrui@loongson.cn>,\n\tBibo Mao <maobibo@loongson.cn>, Huacai Chen <chenhuacai@kernel.org>,\n\tAnup Patel <anup@brainfault.org>, Paul Walmsley <pjw@kernel.org>,\n\tPalmer Dabbelt <palmer@dabbelt.com>, Albert Ou <aou@eecs.berkeley.edu>,\n\tChristian Borntraeger <borntraeger@linux.ibm.com>,\n Janosch Frank <frankja@linux.ibm.com>,\n\tClaudio Imbrenda <imbrenda@linux.ibm.com>,\n Sean Christopherson <seanjc@google.com>", "Cc": "kvm@vger.kernel.org, linux-arm-kernel@lists.infradead.org,\n\tkvmarm@lists.linux.dev, loongarch@lists.linux.dev,\n\tkvm-riscv@lists.infradead.org, linux-riscv@lists.infradead.org,\n\tlinux-kernel@vger.kernel.org, David Matlack <dmatlack@google.com>", "X-CRM114-Version": "20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3 ", "X-CRM114-CacheID": "sfid-20260420_222047_706624_1312967F ", "X-CRM114-Status": "GOOD ( 15.76 )", "X-Spam-Score": "-9.5 (---------)", "X-Spam-Report": "SpamAssassin version 4.0.1 on casper.infradead.org summary:\n Content analysis details: (-9.5 points, 5.0 required)\n pts rule name description\n ---- ----------------------\n --------------------------------------------------\n -0.0 RCVD_IN_DNSWL_NONE RBL: Sender listed at https://www.dnswl.org/, no\n trust\n [2607:f8b0:4864:20:0:0:0:54a listed in]\n [list.dnswl.org]\n 0.0 SPF_HELO_NONE SPF: HELO does not publish an SPF Record\n -7.5 USER_IN_DEF_DKIM_WL From: address is in the default DKIM welcome-list\n -0.0 SPF_PASS SPF: sender matches SPF record\n -0.1 DKIM_VALID_AU Message has a valid DKIM or DK signature from\n author's\n domain\n -0.1 DKIM_VALID Message has at least one valid DKIM or DK\n signature\n 0.1 DKIM_SIGNED Message has a DKIM or DK signature,\n not necessarily valid\n -1.9 BAYES_00 BODY: Bayes spam probability is 0 to 1%\n [score: 0.0000]\n -0.0 DKIMWL_WL_MED DKIMwl.org - Medium trust sender", "X-BeenThere": "kvm-riscv@lists.infradead.org", "X-Mailman-Version": "2.1.34", "Precedence": "list", "List-Id": "<kvm-riscv.lists.infradead.org>", "List-Unsubscribe": "<http://lists.infradead.org/mailman/options/kvm-riscv>,\n <mailto:kvm-riscv-request@lists.infradead.org?subject=unsubscribe>", "List-Archive": "<http://lists.infradead.org/pipermail/kvm-riscv/>", "List-Post": "<mailto:kvm-riscv@lists.infradead.org>", "List-Help": "<mailto:kvm-riscv-request@lists.infradead.org?subject=help>", "List-Subscribe": "<http://lists.infradead.org/mailman/listinfo/kvm-riscv>,\n <mailto:kvm-riscv-request@lists.infradead.org?subject=subscribe>", "Reply-To": "Sean Christopherson <seanjc@google.com>", "Content-Type": "text/plain; charset=\"us-ascii\"", "Content-Transfer-Encoding": "7bit", "Sender": "\"kvm-riscv\" <kvm-riscv-bounces@lists.infradead.org>", "Errors-To": "kvm-riscv-bounces+incoming=patchwork.ozlabs.org@lists.infradead.org" }, "content": "Replace all variations of \"paddr\" variables in KVM selftests with \"gpa\",\nwith the exception of the ELF structures, as those fields are not specific\nto guest virtual addresses, to complete the conversion from vm_paddr_t to\ngpa_t.\n\nNo functional change intended.\n\nSigned-off-by: Sean Christopherson <seanjc@google.com>\n---\n .../testing/selftests/kvm/arm64/sea_to_user.c | 2 +-\n .../testing/selftests/kvm/include/kvm_util.h | 23 ++++----\n .../selftests/kvm/include/x86/processor.h | 6 +--\n .../selftests/kvm/lib/arm64/processor.c | 22 ++++----\n tools/testing/selftests/kvm/lib/kvm_util.c | 53 +++++++++----------\n .../selftests/kvm/lib/loongarch/processor.c | 14 ++---\n .../selftests/kvm/lib/riscv/processor.c | 16 +++---\n .../selftests/kvm/lib/s390/processor.c | 12 ++---\n .../testing/selftests/kvm/lib/x86/processor.c | 50 ++++++++---------\n 9 files changed, 98 insertions(+), 100 deletions(-)", "diff": "diff --git a/tools/testing/selftests/kvm/arm64/sea_to_user.c b/tools/testing/selftests/kvm/arm64/sea_to_user.c\nindex e16034852470..e96d8982c28b 100644\n--- a/tools/testing/selftests/kvm/arm64/sea_to_user.c\n+++ b/tools/testing/selftests/kvm/arm64/sea_to_user.c\n@@ -275,7 +275,7 @@ static struct kvm_vm *vm_create_with_sea_handler(struct kvm_vcpu **vcpu)\n \tvm_userspace_mem_region_add(\n \t\t/*vm=*/vm,\n \t\t/*src_type=*/src_type,\n-\t\t/*guest_paddr=*/start_gpa,\n+\t\t/*gpa=*/start_gpa,\n \t\t/*slot=*/1,\n \t\t/*npages=*/num_guest_pages,\n \t\t/*flags=*/0);\ndiff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h\nindex 0d9f11be9806..2ecaaa0e9965 100644\n--- a/tools/testing/selftests/kvm/include/kvm_util.h\n+++ b/tools/testing/selftests/kvm/include/kvm_util.h\n@@ -725,7 +725,7 @@ gva_t vm_alloc_pages(struct kvm_vm *vm, int nr_pages);\n gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type);\n gva_t vm_alloc_page(struct kvm_vm *vm);\n \n-void virt_map(struct kvm_vm *vm, gva_t gva, u64 paddr,\n+void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa,\n \t unsigned int npages);\n void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa);\n void *addr_gva2hva(struct kvm_vm *vm, gva_t gva);\n@@ -990,21 +990,20 @@ void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);\n \n const char *exit_reason_str(unsigned int exit_reason);\n \n-gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t paddr_min, u32 memslot);\n-gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,\n-\t\t\t gpa_t paddr_min, u32 memslot,\n-\t\t\t bool protected);\n+gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot);\n+gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, gpa_t min_gpa,\n+\t\t\t u32 memslot, bool protected);\n gpa_t vm_alloc_page_table(struct kvm_vm *vm);\n \n static inline gpa_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,\n-\t\t\t\t gpa_t paddr_min, u32 memslot)\n+\t\t\t\t gpa_t min_gpa, u32 memslot)\n {\n \t/*\n \t * By default, allocate memory as protected for VMs that support\n \t * protected memory, as the majority of memory for such VMs is\n \t * protected, i.e. using shared memory is effectively opt-in.\n \t */\n-\treturn __vm_phy_pages_alloc(vm, num, paddr_min, memslot,\n+\treturn __vm_phy_pages_alloc(vm, num, min_gpa, memslot,\n \t\t\t\t vm_arch_has_protected_memory(vm));\n }\n \n@@ -1203,13 +1202,13 @@ static inline void virt_pgd_alloc(struct kvm_vm *vm)\n \n /*\n * Within @vm, creates a virtual translation for the page starting\n- * at @gva to the page starting at @paddr.\n+ * at @gva to the page starting at @gpa.\n */\n-void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr);\n+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa);\n \n-static inline void virt_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr)\n+static inline void virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)\n {\n-\tvirt_arch_pg_map(vm, gva, paddr);\n+\tvirt_arch_pg_map(vm, gva, gpa);\n \tsparsebit_set(vm->vpages_mapped, gva >> vm->page_shift);\n }\n \n@@ -1280,7 +1279,7 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus);\n void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm);\n void kvm_arch_vm_release(struct kvm_vm *vm);\n \n-bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t paddr);\n+bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa);\n \n u32 guest_get_vcpuid(void);\n \ndiff --git a/tools/testing/selftests/kvm/include/x86/processor.h b/tools/testing/selftests/kvm/include/x86/processor.h\nindex 97dc887658c3..77f576ee7789 100644\n--- a/tools/testing/selftests/kvm/include/x86/processor.h\n+++ b/tools/testing/selftests/kvm/include/x86/processor.h\n@@ -1508,13 +1508,13 @@ void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels,\n \t\t struct pte_masks *pte_masks);\n \n void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva,\n-\t\t u64 paddr, int level);\n-void virt_map_level(struct kvm_vm *vm, gva_t gva, u64 paddr,\n+\t\t gpa_t gpa, int level);\n+void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa,\n \t\t u64 nr_bytes, int level);\n \n void vm_enable_tdp(struct kvm_vm *vm);\n bool kvm_cpu_has_tdp(void);\n-void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, u64 paddr, u64 size);\n+void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size);\n void tdp_identity_map_default_memslots(struct kvm_vm *vm);\n void tdp_identity_map_1g(struct kvm_vm *vm, u64 addr, u64 size);\n u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa);\ndiff --git a/tools/testing/selftests/kvm/lib/arm64/processor.c b/tools/testing/selftests/kvm/lib/arm64/processor.c\nindex 0f693d8891d2..01325bf4d36f 100644\n--- a/tools/testing/selftests/kvm/lib/arm64/processor.c\n+++ b/tools/testing/selftests/kvm/lib/arm64/processor.c\n@@ -121,7 +121,7 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)\n \tvm->mmu.pgd_created = true;\n }\n \n-static void _virt_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr,\n+static void _virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa,\n \t\t\t u64 flags)\n {\n \tu8 attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT);\n@@ -133,13 +133,13 @@ static void _virt_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr,\n \t\t\" gva: 0x%lx vm->page_size: 0x%x\", gva, vm->page_size);\n \tTEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),\n \t\t \"Invalid virtual address, gva: 0x%lx\", gva);\n-\tTEST_ASSERT((paddr % vm->page_size) == 0,\n-\t\t\"Physical address not on page boundary,\\n\"\n-\t\t\" paddr: 0x%lx vm->page_size: 0x%x\", paddr, vm->page_size);\n-\tTEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,\n-\t\t\"Physical address beyond beyond maximum supported,\\n\"\n-\t\t\" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x\",\n-\t\tpaddr, vm->max_gfn, vm->page_size);\n+\tTEST_ASSERT((gpa % vm->page_size) == 0,\n+\t\t \"Physical address not on page boundary,\\n\"\n+\t\t \" gpa: 0x%lx vm->page_size: 0x%x\", gpa, vm->page_size);\n+\tTEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,\n+\t\t \"Physical address beyond beyond maximum supported,\\n\"\n+\t\t \" gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x\",\n+\t\t gpa, vm->max_gfn, vm->page_size);\n \n \tptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, gva) * 8;\n \tif (!*ptep)\n@@ -170,14 +170,14 @@ static void _virt_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr,\n \tif (!use_lpa2_pte_format(vm))\n \t\tpg_attr |= PTE_SHARED;\n \n-\t*ptep = addr_pte(vm, paddr, pg_attr);\n+\t*ptep = addr_pte(vm, gpa, pg_attr);\n }\n \n-void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr)\n+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)\n {\n \tu64 attr_idx = MT_NORMAL;\n \n-\t_virt_pg_map(vm, gva, paddr, attr_idx);\n+\t_virt_pg_map(vm, gva, gpa, attr_idx);\n }\n \n u64 *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level)\ndiff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c\nindex 905fa214099d..2a76eca7029d 100644\n--- a/tools/testing/selftests/kvm/lib/kvm_util.c\n+++ b/tools/testing/selftests/kvm/lib/kvm_util.c\n@@ -1027,8 +1027,8 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,\n \n \t\tTEST_FAIL(\"A mem region with the requested slot \"\n \t\t\t\"already exists.\\n\"\n-\t\t\t\" requested slot: %u paddr: 0x%lx npages: 0x%lx\\n\"\n-\t\t\t\" existing slot: %u paddr: 0x%lx size: 0x%lx\",\n+\t\t\t\" requested slot: %u gpa: 0x%lx npages: 0x%lx\\n\"\n+\t\t\t\" existing slot: %u gpa: 0x%lx size: 0x%lx\",\n \t\t\tslot, gpa, npages, region->region.slot,\n \t\t\t(u64)region->region.guest_phys_addr,\n \t\t\t(u64)region->region.memory_size);\n@@ -1442,7 +1442,7 @@ static gva_t ____vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva,\n \tu64 pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);\n \n \tvirt_pgd_alloc(vm);\n-\tgpa_t paddr = __vm_phy_pages_alloc(vm, pages,\n+\tgpa_t gpa = __vm_phy_pages_alloc(vm, pages,\n \t\t\t\t\t KVM_UTIL_MIN_PFN * vm->page_size,\n \t\t\t\t\t vm->memslots[type], protected);\n \n@@ -1454,9 +1454,9 @@ static gva_t ____vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva,\n \n \t/* Map the virtual pages. */\n \tfor (gva_t gva = gva_start; pages > 0;\n-\t\tpages--, gva += vm->page_size, paddr += vm->page_size) {\n+\t\tpages--, gva += vm->page_size, gpa += vm->page_size) {\n \n-\t\tvirt_pg_map(vm, gva, paddr);\n+\t\tvirt_pg_map(vm, gva, gpa);\n \t}\n \n \treturn gva_start;\n@@ -1506,22 +1506,21 @@ gva_t vm_alloc_page(struct kvm_vm *vm)\n * Map a range of VM virtual address to the VM's physical address.\n *\n * Within the VM given by @vm, creates a virtual translation for @npages\n- * starting at @gva to the page range starting at @paddr.\n+ * starting at @gva to the page range starting at @gpa.\n */\n-void virt_map(struct kvm_vm *vm, gva_t gva, u64 paddr,\n-\t unsigned int npages)\n+void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, unsigned int npages)\n {\n \tsize_t page_size = vm->page_size;\n \tsize_t size = npages * page_size;\n \n \tTEST_ASSERT(gva + size > gva, \"Vaddr overflow\");\n-\tTEST_ASSERT(paddr + size > paddr, \"Paddr overflow\");\n+\tTEST_ASSERT(gpa + size > gpa, \"Paddr overflow\");\n \n \twhile (npages--) {\n-\t\tvirt_pg_map(vm, gva, paddr);\n+\t\tvirt_pg_map(vm, gva, gpa);\n \n \t\tgva += page_size;\n-\t\tpaddr += page_size;\n+\t\tgpa += page_size;\n \t}\n }\n \n@@ -2008,7 +2007,7 @@ const char *exit_reason_str(unsigned int exit_reason)\n * Input Args:\n * vm - Virtual Machine\n * num - number of pages\n- * paddr_min - Physical address minimum\n+ * min_gpa - Physical address minimum\n * memslot - Memory region to allocate page from\n * protected - True if the pages will be used as protected/private memory\n *\n@@ -2018,12 +2017,12 @@ const char *exit_reason_str(unsigned int exit_reason)\n * Starting physical address\n *\n * Within the VM specified by vm, locates a range of available physical\n- * pages at or above paddr_min. If found, the pages are marked as in use\n+ * pages at or above min_gpa. If found, the pages are marked as in use\n * and their base address is returned. A TEST_ASSERT failure occurs if\n- * not enough pages are available at or above paddr_min.\n+ * not enough pages are available at or above min_gpa.\n */\n gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,\n-\t\t\t gpa_t paddr_min, u32 memslot,\n+\t\t\t gpa_t min_gpa, u32 memslot,\n \t\t\t bool protected)\n {\n \tstruct userspace_mem_region *region;\n@@ -2031,16 +2030,16 @@ gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,\n \n \tTEST_ASSERT(num > 0, \"Must allocate at least one page\");\n \n-\tTEST_ASSERT((paddr_min % vm->page_size) == 0, \"Min physical address \"\n+\tTEST_ASSERT((min_gpa % vm->page_size) == 0, \"Min physical address \"\n \t\t\"not divisible by page size.\\n\"\n-\t\t\" paddr_min: 0x%lx page_size: 0x%x\",\n-\t\tpaddr_min, vm->page_size);\n+\t\t\" min_gpa: 0x%lx page_size: 0x%x\",\n+\t\tmin_gpa, vm->page_size);\n \n \tregion = memslot2region(vm, memslot);\n \tTEST_ASSERT(!protected || region->protected_phy_pages,\n \t\t \"Region doesn't support protected memory\");\n \n-\tbase = pg = paddr_min >> vm->page_shift;\n+\tbase = pg = min_gpa >> vm->page_shift;\n \tdo {\n \t\tfor (; pg < base + num; ++pg) {\n \t\t\tif (!sparsebit_is_set(region->unused_phy_pages, pg)) {\n@@ -2052,8 +2051,8 @@ gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,\n \n \tif (pg == 0) {\n \t\tfprintf(stderr, \"No guest physical page available, \"\n-\t\t\t\"paddr_min: 0x%lx page_size: 0x%x memslot: %u\\n\",\n-\t\t\tpaddr_min, vm->page_size, memslot);\n+\t\t\t\"min_gpa: 0x%lx page_size: 0x%x memslot: %u\\n\",\n+\t\t\tmin_gpa, vm->page_size, memslot);\n \t\tfputs(\"---- vm dump ----\\n\", stderr);\n \t\tvm_dump(stderr, vm, 2);\n \t\tabort();\n@@ -2068,9 +2067,9 @@ gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,\n \treturn base * vm->page_size;\n }\n \n-gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t paddr_min, u32 memslot)\n+gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot)\n {\n-\treturn vm_phy_pages_alloc(vm, 1, paddr_min, memslot);\n+\treturn vm_phy_pages_alloc(vm, 1, min_gpa, memslot);\n }\n \n gpa_t vm_alloc_page_table(struct kvm_vm *vm)\n@@ -2287,7 +2286,7 @@ void __attribute((constructor)) kvm_selftest_init(void)\n \tkvm_selftest_arch_init();\n }\n \n-bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t paddr)\n+bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa)\n {\n \tsparsebit_idx_t pg = 0;\n \tstruct userspace_mem_region *region;\n@@ -2295,10 +2294,10 @@ bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t paddr)\n \tif (!vm_arch_has_protected_memory(vm))\n \t\treturn false;\n \n-\tregion = userspace_mem_region_find(vm, paddr, paddr);\n-\tTEST_ASSERT(region, \"No vm physical memory at 0x%lx\", paddr);\n+\tregion = userspace_mem_region_find(vm, gpa, gpa);\n+\tTEST_ASSERT(region, \"No vm physical memory at 0x%lx\", gpa);\n \n-\tpg = paddr >> vm->page_shift;\n+\tpg = gpa >> vm->page_shift;\n \treturn sparsebit_is_set(region->protected_phy_pages, pg);\n }\n \ndiff --git a/tools/testing/selftests/kvm/lib/loongarch/processor.c b/tools/testing/selftests/kvm/lib/loongarch/processor.c\nindex 47e782056196..64d91fb76522 100644\n--- a/tools/testing/selftests/kvm/lib/loongarch/processor.c\n+++ b/tools/testing/selftests/kvm/lib/loongarch/processor.c\n@@ -116,7 +116,7 @@ gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)\n \treturn pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));\n }\n \n-void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr)\n+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)\n {\n \tu32 prot_bits;\n \tu64 *ptep;\n@@ -126,17 +126,17 @@ void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr)\n \t\t\t\"gva: 0x%lx vm->page_size: 0x%x\", gva, vm->page_size);\n \tTEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),\n \t\t\t\"Invalid virtual address, gva: 0x%lx\", gva);\n-\tTEST_ASSERT((paddr % vm->page_size) == 0,\n+\tTEST_ASSERT((gpa % vm->page_size) == 0,\n \t\t\t\"Physical address not on page boundary,\\n\"\n-\t\t\t\"paddr: 0x%lx vm->page_size: 0x%x\", paddr, vm->page_size);\n-\tTEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,\n+\t\t\t\"gpa: 0x%lx vm->page_size: 0x%x\", gpa, vm->page_size);\n+\tTEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,\n \t\t\t\"Physical address beyond maximum supported,\\n\"\n-\t\t\t\"paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x\",\n-\t\t\tpaddr, vm->max_gfn, vm->page_size);\n+\t\t\t\"gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x\",\n+\t\t\tgpa, vm->max_gfn, vm->page_size);\n \n \tptep = virt_populate_pte(vm, gva, 1);\n \tprot_bits = _PAGE_PRESENT | __READABLE | __WRITEABLE | _CACHE_CC | _PAGE_USER;\n-\tWRITE_ONCE(*ptep, paddr | prot_bits);\n+\tWRITE_ONCE(*ptep, gpa | prot_bits);\n }\n \n static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, u64 page, int level)\ndiff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c\nindex 108144fb858b..ded5429f3448 100644\n--- a/tools/testing/selftests/kvm/lib/riscv/processor.c\n+++ b/tools/testing/selftests/kvm/lib/riscv/processor.c\n@@ -75,7 +75,7 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)\n \tvm->mmu.pgd_created = true;\n }\n \n-void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr)\n+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)\n {\n \tu64 *ptep, next_ppn;\n \tint level = vm->mmu.pgtable_levels - 1;\n@@ -85,13 +85,13 @@ void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr)\n \t\t\" gva: 0x%lx vm->page_size: 0x%x\", gva, vm->page_size);\n \tTEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),\n \t\t \"Invalid virtual address, gva: 0x%lx\", gva);\n-\tTEST_ASSERT((paddr % vm->page_size) == 0,\n+\tTEST_ASSERT((gpa % vm->page_size) == 0,\n \t\t\"Physical address not on page boundary,\\n\"\n-\t\t\" paddr: 0x%lx vm->page_size: 0x%x\", paddr, vm->page_size);\n-\tTEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,\n+\t\t\" gpa: 0x%lx vm->page_size: 0x%x\", gpa, vm->page_size);\n+\tTEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,\n \t\t\"Physical address beyond maximum supported,\\n\"\n-\t\t\" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x\",\n-\t\tpaddr, vm->max_gfn, vm->page_size);\n+\t\t\" gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x\",\n+\t\tgpa, vm->max_gfn, vm->page_size);\n \n \tptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, gva, level) * 8;\n \tif (!*ptep) {\n@@ -113,8 +113,8 @@ void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr)\n \t\tlevel--;\n \t}\n \n-\tpaddr = paddr >> PGTBL_PAGE_SIZE_SHIFT;\n-\t*ptep = (paddr << PGTBL_PTE_ADDR_SHIFT) |\n+\tgpa = gpa >> PGTBL_PAGE_SIZE_SHIFT;\n+\t*ptep = (gpa << PGTBL_PTE_ADDR_SHIFT) |\n \t\tPGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK;\n }\n \ndiff --git a/tools/testing/selftests/kvm/lib/s390/processor.c b/tools/testing/selftests/kvm/lib/s390/processor.c\nindex 77a7b6965812..a9adb3782b35 100644\n--- a/tools/testing/selftests/kvm/lib/s390/processor.c\n+++ b/tools/testing/selftests/kvm/lib/s390/processor.c\n@@ -12,7 +12,7 @@\n \n void virt_arch_pgd_alloc(struct kvm_vm *vm)\n {\n-\tgpa_t paddr;\n+\tgpa_t gpa;\n \n \tTEST_ASSERT(vm->page_size == PAGE_SIZE, \"Unsupported page size: 0x%x\",\n \t\t vm->page_size);\n@@ -20,12 +20,12 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)\n \tif (vm->mmu.pgd_created)\n \t\treturn;\n \n-\tpaddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,\n+\tgpa = vm_phy_pages_alloc(vm, PAGES_PER_REGION,\n \t\t\t\t KVM_GUEST_PAGE_TABLE_MIN_PADDR,\n \t\t\t\t vm->memslots[MEM_REGION_PT]);\n-\tmemset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);\n+\tmemset(addr_gpa2hva(vm, gpa), 0xff, PAGES_PER_REGION * vm->page_size);\n \n-\tvm->mmu.pgd = paddr;\n+\tvm->mmu.pgd = gpa;\n \tvm->mmu.pgd_created = true;\n }\n \n@@ -60,11 +60,11 @@ void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)\n \t\t \"Invalid virtual address, gva: 0x%lx\", gva);\n \tTEST_ASSERT((gpa % vm->page_size) == 0,\n \t\t\"Physical address not on page boundary,\\n\"\n-\t\t\" paddr: 0x%lx vm->page_size: 0x%x\",\n+\t\t\" gpa: 0x%lx vm->page_size: 0x%x\",\n \t\tgva, vm->page_size);\n \tTEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,\n \t\t\"Physical address beyond beyond maximum supported,\\n\"\n-\t\t\" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x\",\n+\t\t\" gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x\",\n \t\tgva, vm->max_gfn, vm->page_size);\n \n \t/* Walk through region and segment tables */\ndiff --git a/tools/testing/selftests/kvm/lib/x86/processor.c b/tools/testing/selftests/kvm/lib/x86/processor.c\nindex 892cc517d9f1..b51467d70f6e 100644\n--- a/tools/testing/selftests/kvm/lib/x86/processor.c\n+++ b/tools/testing/selftests/kvm/lib/x86/processor.c\n@@ -224,20 +224,20 @@ static u64 *virt_create_upper_pte(struct kvm_vm *vm,\n \t\t\t\t struct kvm_mmu *mmu,\n \t\t\t\t u64 *parent_pte,\n \t\t\t\t gva_t gva,\n-\t\t\t\t u64 paddr,\n+\t\t\t\t gpa_t gpa,\n \t\t\t\t int current_level,\n \t\t\t\t int target_level)\n {\n \tu64 *pte = virt_get_pte(vm, mmu, parent_pte, gva, current_level);\n \n-\tpaddr = vm_untag_gpa(vm, paddr);\n+\tgpa = vm_untag_gpa(vm, gpa);\n \n \tif (!is_present_pte(mmu, pte)) {\n \t\t*pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |\n \t\t PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |\n \t\t PTE_ALWAYS_SET_MASK(mmu);\n \t\tif (current_level == target_level)\n-\t\t\t*pte |= PTE_HUGE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);\n+\t\t\t*pte |= PTE_HUGE_MASK(mmu) | (gpa & PHYSICAL_PAGE_MASK);\n \t\telse\n \t\t\t*pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;\n \t} else {\n@@ -257,7 +257,7 @@ static u64 *virt_create_upper_pte(struct kvm_vm *vm,\n }\n \n void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva,\n-\t\t u64 paddr, int level)\n+\t\t gpa_t gpa, int level)\n {\n \tconst u64 pg_size = PG_LEVEL_SIZE(level);\n \tu64 *pte = &mmu->pgd;\n@@ -271,15 +271,15 @@ void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva,\n \t\t \"gva: 0x%lx page size: 0x%lx\", gva, pg_size);\n \tTEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),\n \t\t \"Invalid virtual address, gva: 0x%lx\", gva);\n-\tTEST_ASSERT((paddr % pg_size) == 0,\n+\tTEST_ASSERT((gpa % pg_size) == 0,\n \t\t \"Physical address not aligned,\\n\"\n-\t\t \" paddr: 0x%lx page size: 0x%lx\", paddr, pg_size);\n-\tTEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,\n+\t\t \" gpa: 0x%lx page size: 0x%lx\", gpa, pg_size);\n+\tTEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,\n \t\t \"Physical address beyond maximum supported,\\n\"\n-\t\t \" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x\",\n-\t\t paddr, vm->max_gfn, vm->page_size);\n-\tTEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr,\n-\t\t \"Unexpected bits in paddr: %lx\", paddr);\n+\t\t \" gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x\",\n+\t\t gpa, vm->max_gfn, vm->page_size);\n+\tTEST_ASSERT(vm_untag_gpa(vm, gpa) == gpa,\n+\t\t \"Unexpected bits in gpa: %lx\", gpa);\n \n \tTEST_ASSERT(!PTE_EXECUTABLE_MASK(mmu) || !PTE_NX_MASK(mmu),\n \t\t \"X and NX bit masks cannot be used simultaneously\");\n@@ -291,7 +291,7 @@ void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva,\n \tfor (current_level = mmu->pgtable_levels;\n \t current_level > PG_LEVEL_4K;\n \t current_level--) {\n-\t\tpte = virt_create_upper_pte(vm, mmu, pte, gva, paddr,\n+\t\tpte = virt_create_upper_pte(vm, mmu, pte, gva, gpa,\n \t\t\t\t\t current_level, level);\n \t\tif (is_huge_pte(mmu, pte))\n \t\t\treturn;\n@@ -303,24 +303,24 @@ void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva,\n \t\t \"PTE already present for 4k page at gva: 0x%lx\", gva);\n \t*pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |\n \t PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |\n-\t PTE_ALWAYS_SET_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);\n+\t PTE_ALWAYS_SET_MASK(mmu) | (gpa & PHYSICAL_PAGE_MASK);\n \n \t/*\n \t * Neither SEV nor TDX supports shared page tables, so only the final\n \t * leaf PTE needs manually set the C/S-bit.\n \t */\n-\tif (vm_is_gpa_protected(vm, paddr))\n+\tif (vm_is_gpa_protected(vm, gpa))\n \t\t*pte |= PTE_C_BIT_MASK(mmu);\n \telse\n \t\t*pte |= PTE_S_BIT_MASK(mmu);\n }\n \n-void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr)\n+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)\n {\n-\t__virt_pg_map(vm, &vm->mmu, gva, paddr, PG_LEVEL_4K);\n+\t__virt_pg_map(vm, &vm->mmu, gva, gpa, PG_LEVEL_4K);\n }\n \n-void virt_map_level(struct kvm_vm *vm, gva_t gva, u64 paddr,\n+void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa,\n \t\t u64 nr_bytes, int level)\n {\n \tu64 pg_size = PG_LEVEL_SIZE(level);\n@@ -332,12 +332,12 @@ void virt_map_level(struct kvm_vm *vm, gva_t gva, u64 paddr,\n \t\t nr_bytes, pg_size);\n \n \tfor (i = 0; i < nr_pages; i++) {\n-\t\t__virt_pg_map(vm, &vm->mmu, gva, paddr, level);\n+\t\t__virt_pg_map(vm, &vm->mmu, gva, gpa, level);\n \t\tsparsebit_set_num(vm->vpages_mapped, gva >> vm->page_shift,\n \t\t\t\t nr_bytes / PAGE_SIZE);\n \n \t\tgva += pg_size;\n-\t\tpaddr += pg_size;\n+\t\tgpa += pg_size;\n \t}\n }\n \n@@ -495,24 +495,24 @@ bool kvm_cpu_has_tdp(void)\n \treturn kvm_cpu_has_ept() || kvm_cpu_has_npt();\n }\n \n-void __tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, u64 paddr, u64 size, int level)\n+void __tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size, int level)\n {\n \tsize_t page_size = PG_LEVEL_SIZE(level);\n \tsize_t npages = size / page_size;\n \n \tTEST_ASSERT(l2_gpa + size > l2_gpa, \"L2 GPA overflow\");\n-\tTEST_ASSERT(paddr + size > paddr, \"Paddr overflow\");\n+\tTEST_ASSERT(gpa + size > gpa, \"GPA overflow\");\n \n \twhile (npages--) {\n-\t\t__virt_pg_map(vm, &vm->stage2_mmu, l2_gpa, paddr, level);\n+\t\t__virt_pg_map(vm, &vm->stage2_mmu, l2_gpa, gpa, level);\n \t\tl2_gpa += page_size;\n-\t\tpaddr += page_size;\n+\t\tgpa += page_size;\n \t}\n }\n \n-void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, u64 paddr, u64 size)\n+void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size)\n {\n-\t__tdp_map(vm, l2_gpa, paddr, size, PG_LEVEL_4K);\n+\t__tdp_map(vm, l2_gpa, gpa, size, PG_LEVEL_4K);\n }\n \n /* Prepare an identity extended page table that maps all the\n", "prefixes": [ "v3", "19/19" ] }