From patchwork Fri Mar 8 18:20:52 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Eric Auger X-Patchwork-Id: 1053645 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Authentication-Results: ozlabs.org; spf=pass (mailfrom) smtp.mailfrom=nongnu.org (client-ip=209.51.188.17; helo=lists.gnu.org; envelope-from=qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org; receiver=) Authentication-Results: ozlabs.org; dmarc=fail (p=none dis=none) header.from=redhat.com Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (No client certificate requested) by ozlabs.org (Postfix) with ESMTPS id 44GG7B3hVWz9sBb for ; Sat, 9 Mar 2019 05:22:14 +1100 (AEDT) Received: from localhost ([127.0.0.1]:48109 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1h2K8K-0000Wj-8C for incoming@patchwork.ozlabs.org; Fri, 08 Mar 2019 13:22:12 -0500 Received: from eggs.gnu.org ([209.51.188.92]:49211) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1h2K7V-0000VX-Vo for qemu-devel@nongnu.org; Fri, 08 Mar 2019 13:21:24 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1h2K7U-0008EQ-0c for qemu-devel@nongnu.org; Fri, 08 Mar 2019 13:21:21 -0500 Received: from mx1.redhat.com ([209.132.183.28]:59964) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1h2K7O-0008AK-6F; Fri, 08 Mar 2019 13:21:14 -0500 Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.phx2.redhat.com [10.5.11.15]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 8EFC83092650; Fri, 8 Mar 2019 18:21:10 +0000 (UTC) Received: from laptop.redhat.com (ovpn-116-102.ams2.redhat.com [10.36.116.102]) by smtp.corp.redhat.com (Postfix) with ESMTP id 9F7875D787; Fri, 8 Mar 2019 18:21:07 +0000 (UTC) From: Eric Auger To: eric.auger.pro@gmail.com, eric.auger@redhat.com, qemu-devel@nongnu.org, qemu-arm@nongnu.org, peter.maydell@linaro.org, shameerali.kolothum.thodi@huawei.com, imammedo@redhat.com, david@redhat.com, pbonzini@redhat.com, ehabkost@redhat.com, richard.henderson@linaro.org, sbhat@linux.ibm.com, philmd@redhat.com Date: Fri, 8 Mar 2019 19:20:52 +0100 Message-Id: <20190308182053.5487-2-eric.auger@redhat.com> In-Reply-To: <20190308182053.5487-1-eric.auger@redhat.com> References: <20190308182053.5487-1-eric.auger@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.79 on 10.5.11.15 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.43]); Fri, 08 Mar 2019 18:21:10 +0000 (UTC) X-detected-operating-system: by eggs.gnu.org: GNU/Linux 2.2.x-3.x [generic] X-Received-From: 209.132.183.28 Subject: [Qemu-devel] [PATCH v4 1/2] nvdimm: Rename AcpiNVDIMMState into NVDIMMState X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: "Qemu-devel" As we intend to migrate the acpi_nvdimm_state into the base machine with a new dimms_state name, let's also rename the datatype. Signed-off-by: Eric Auger Suggested-by: Igor Mammedov Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Igor Mammedov --- hw/acpi/nvdimm.c | 18 +++++++++--------- hw/i386/pc.c | 2 +- include/hw/i386/pc.h | 2 +- include/hw/mem/nvdimm.h | 10 +++++----- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c index e53b2cb681..f73cfb9d90 100644 --- a/hw/acpi/nvdimm.c +++ b/hw/acpi/nvdimm.c @@ -382,7 +382,7 @@ nvdimm_build_structure_caps(GArray *structures, uint32_t capabilities) nfit_caps->capabilities = cpu_to_le32(capabilities); } -static GArray *nvdimm_build_device_structure(AcpiNVDIMMState *state) +static GArray *nvdimm_build_device_structure(NVDIMMState *state) { GSList *device_list = nvdimm_get_device_list(); GArray *structures = g_array_new(false, true /* clear */, 1); @@ -416,7 +416,7 @@ static void nvdimm_init_fit_buffer(NvdimmFitBuffer *fit_buf) fit_buf->fit = g_array_new(false, true /* clear */, 1); } -static void nvdimm_build_fit_buffer(AcpiNVDIMMState *state) +static void nvdimm_build_fit_buffer(NVDIMMState *state) { NvdimmFitBuffer *fit_buf = &state->fit_buf; @@ -425,12 +425,12 @@ static void nvdimm_build_fit_buffer(AcpiNVDIMMState *state) fit_buf->dirty = true; } -void nvdimm_plug(AcpiNVDIMMState *state) +void nvdimm_plug(NVDIMMState *state) { nvdimm_build_fit_buffer(state); } -static void nvdimm_build_nfit(AcpiNVDIMMState *state, GArray *table_offsets, +static void nvdimm_build_nfit(NVDIMMState *state, GArray *table_offsets, GArray *table_data, BIOSLinker *linker) { NvdimmFitBuffer *fit_buf = &state->fit_buf; @@ -570,7 +570,7 @@ nvdimm_dsm_no_payload(uint32_t func_ret_status, hwaddr dsm_mem_addr) #define NVDIMM_QEMU_RSVD_HANDLE_ROOT 0x10000 /* Read FIT data, defined in docs/specs/acpi_nvdimm.txt. */ -static void nvdimm_dsm_func_read_fit(AcpiNVDIMMState *state, NvdimmDsmIn *in, +static void nvdimm_dsm_func_read_fit(NVDIMMState *state, NvdimmDsmIn *in, hwaddr dsm_mem_addr) { NvdimmFitBuffer *fit_buf = &state->fit_buf; @@ -619,7 +619,7 @@ exit: } static void -nvdimm_dsm_handle_reserved_root_method(AcpiNVDIMMState *state, +nvdimm_dsm_handle_reserved_root_method(NVDIMMState *state, NvdimmDsmIn *in, hwaddr dsm_mem_addr) { switch (in->function) { @@ -863,7 +863,7 @@ nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size) static void nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { - AcpiNVDIMMState *state = opaque; + NVDIMMState *state = opaque; NvdimmDsmIn *in; hwaddr dsm_mem_addr = val; @@ -925,7 +925,7 @@ void nvdimm_acpi_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev) } } -void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io, +void nvdimm_init_acpi_state(NVDIMMState *state, MemoryRegion *io, FWCfgState *fw_cfg, Object *owner) { memory_region_init_io(&state->io_mr, owner, &nvdimm_dsm_ops, state, @@ -1319,7 +1319,7 @@ static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data, } void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data, - BIOSLinker *linker, AcpiNVDIMMState *state, + BIOSLinker *linker, NVDIMMState *state, uint32_t ram_slots) { GSList *device_list; diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 42128183e9..0338dbe9da 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -2571,7 +2571,7 @@ static void pc_machine_set_nvdimm_persistence(Object *obj, const char *value, Error **errp) { PCMachineState *pcms = PC_MACHINE(obj); - AcpiNVDIMMState *nvdimm_state = &pcms->acpi_nvdimm_state; + NVDIMMState *nvdimm_state = &pcms->acpi_nvdimm_state; if (strcmp(value, "cpu") == 0) nvdimm_state->persistence = 3; diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h index 54222a202d..94fb620d65 100644 --- a/include/hw/i386/pc.h +++ b/include/hw/i386/pc.h @@ -45,7 +45,7 @@ struct PCMachineState { OnOffAuto vmport; OnOffAuto smm; - AcpiNVDIMMState acpi_nvdimm_state; + NVDIMMState acpi_nvdimm_state; bool acpi_build_enabled; bool smbus_enabled; diff --git a/include/hw/mem/nvdimm.h b/include/hw/mem/nvdimm.h index c5c9b3c7f8..523a9b3d4a 100644 --- a/include/hw/mem/nvdimm.h +++ b/include/hw/mem/nvdimm.h @@ -123,7 +123,7 @@ struct NvdimmFitBuffer { }; typedef struct NvdimmFitBuffer NvdimmFitBuffer; -struct AcpiNVDIMMState { +struct NVDIMMState { /* detect if NVDIMM support is enabled. */ bool is_enabled; @@ -141,13 +141,13 @@ struct AcpiNVDIMMState { int32_t persistence; char *persistence_string; }; -typedef struct AcpiNVDIMMState AcpiNVDIMMState; +typedef struct NVDIMMState NVDIMMState; -void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io, +void nvdimm_init_acpi_state(NVDIMMState *state, MemoryRegion *io, FWCfgState *fw_cfg, Object *owner); void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data, - BIOSLinker *linker, AcpiNVDIMMState *state, + BIOSLinker *linker, NVDIMMState *state, uint32_t ram_slots); -void nvdimm_plug(AcpiNVDIMMState *state); +void nvdimm_plug(NVDIMMState *state); void nvdimm_acpi_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev); #endif