From patchwork Wed Aug 22 12:31:56 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Julien Grall X-Patchwork-Id: 179370 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 1B5752C009E for ; Thu, 23 Aug 2012 04:56:56 +1000 (EST) Received: from localhost ([::1]:47052 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1T4G6o-00067A-9e for incoming@patchwork.ozlabs.org; Wed, 22 Aug 2012 14:56:54 -0400 Received: from eggs.gnu.org ([208.118.235.92]:55182) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1T4G5N-00034n-K7 for qemu-devel@nongnu.org; Wed, 22 Aug 2012 14:55:26 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1T4G5G-0007dx-VB for qemu-devel@nongnu.org; Wed, 22 Aug 2012 14:55:25 -0400 Received: from smtp.citrix.com ([66.165.176.89]:13432) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1T4G5G-0007FA-N8 for qemu-devel@nongnu.org; Wed, 22 Aug 2012 14:55:18 -0400 X-IronPort-AV: E=Sophos;i="4.80,809,1344225600"; d="scan'208";a="35484803" Received: from ftlpmailmx01.citrite.net ([10.13.107.65]) by FTLPIPO01.CITRIX.COM with ESMTP/TLS/RC4-MD5; 22 Aug 2012 14:55:18 -0400 Received: from meteora.cam.xci-test.com (10.80.248.22) by smtprelay.citrix.com (10.13.107.65) with Microsoft SMTP Server id 8.3.213.0; Wed, 22 Aug 2012 14:55:17 -0400 From: Julien Grall To: qemu-devel@nongnu.org Date: Wed, 22 Aug 2012 13:31:56 +0100 Message-ID: <05f8b1f47b42aaf2430490453a2b783eabbe9bfb.1345552068.git.julien.grall@citrix.com> X-Mailer: git-send-email 1.7.2.5 In-Reply-To: References: MIME-Version: 1.0 X-detected-operating-system: by eggs.gnu.org: Genre and OS details not recognized. X-Received-From: 66.165.176.89 Cc: Julien Grall , christian.limpach@gmail.com, Stefano.Stabellini@eu.citrix.com, xen-devel@lists.xen.org Subject: [Qemu-devel] [XEN][RFC PATCH V2 10/17] xc: Add argument to allocate more special pages X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org This patch permits to allocate more special pages. Indeed, for multiple ioreq server, we need to have 2 shared pages by server. Signed-off-by: Julien Grall --- tools/libxc/xc_hvm_build_x86.c | 59 +++++++++++++++++++----------------- tools/libxc/xenguest.h | 4 ++- tools/python/xen/lowlevel/xc/xc.c | 3 +- 3 files changed, 36 insertions(+), 30 deletions(-) diff --git a/tools/libxc/xc_hvm_build_x86.c b/tools/libxc/xc_hvm_build_x86.c index cf5d7fb..b98536b 100644 --- a/tools/libxc/xc_hvm_build_x86.c +++ b/tools/libxc/xc_hvm_build_x86.c @@ -41,16 +41,15 @@ #define SPECIALPAGE_PAGING 0 #define SPECIALPAGE_ACCESS 1 #define SPECIALPAGE_SHARING 2 -#define SPECIALPAGE_BUFIOREQ 3 -#define SPECIALPAGE_XENSTORE 4 -#define SPECIALPAGE_IOREQ 5 -#define SPECIALPAGE_IDENT_PT 6 -#define SPECIALPAGE_CONSOLE 7 -#define NR_SPECIAL_PAGES 8 -#define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x)) +#define SPECIALPAGE_XENSTORE 3 +#define SPECIALPAGE_IDENT_PT 4 +#define SPECIALPAGE_CONSOLE 5 +#define NR_SPECIAL_PAGES 6 +#define special_pfn(x, add) (0xff000u - (NR_SPECIAL_PAGES + (add)) + (x)) static void build_hvm_info(void *hvm_info_page, uint64_t mem_size, - uint64_t mmio_start, uint64_t mmio_size) + uint64_t mmio_start, uint64_t mmio_size, + uint32_t nr_special_pages) { struct hvm_info_table *hvm_info = (struct hvm_info_table *) (((unsigned char *)hvm_info_page) + HVM_INFO_OFFSET); @@ -78,7 +77,7 @@ static void build_hvm_info(void *hvm_info_page, uint64_t mem_size, /* Memory parameters. */ hvm_info->low_mem_pgend = lowmem_end >> PAGE_SHIFT; hvm_info->high_mem_pgend = highmem_end >> PAGE_SHIFT; - hvm_info->reserved_mem_pgstart = special_pfn(0); + hvm_info->reserved_mem_pgstart = special_pfn(0, nr_special_pages); /* Finish with the checksum. */ for ( i = 0, sum = 0; i < hvm_info->length; i++ ) @@ -148,6 +147,7 @@ static int setup_guest(xc_interface *xch, unsigned long target_pages = args->mem_target >> PAGE_SHIFT; uint64_t mmio_start = (1ull << 32) - args->mmio_size; uint64_t mmio_size = args->mmio_size; + uint32_t nr_special_pages = args->nr_special_pages; unsigned long entry_eip, cur_pages, cur_pfn; void *hvm_info_page; uint32_t *ident_pt; @@ -341,37 +341,38 @@ static int setup_guest(xc_interface *xch, xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE, HVM_INFO_PFN)) == NULL ) goto error_out; - build_hvm_info(hvm_info_page, v_end, mmio_start, mmio_size); + build_hvm_info(hvm_info_page, v_end, mmio_start, mmio_size, nr_special_pages); munmap(hvm_info_page, PAGE_SIZE); /* Allocate and clear special pages. */ - for ( i = 0; i < NR_SPECIAL_PAGES; i++ ) + for ( i = 0; i < (NR_SPECIAL_PAGES + nr_special_pages); i++ ) { - xen_pfn_t pfn = special_pfn(i); + xen_pfn_t pfn = special_pfn(i, nr_special_pages); rc = xc_domain_populate_physmap_exact(xch, dom, 1, 0, 0, &pfn); if ( rc != 0 ) { PERROR("Could not allocate %d'th special page.", i); goto error_out; } - if ( xc_clear_domain_page(xch, dom, special_pfn(i)) ) + if ( xc_clear_domain_page(xch, dom, special_pfn(i, nr_special_pages)) ) goto error_out; } xc_set_hvm_param(xch, dom, HVM_PARAM_STORE_PFN, - special_pfn(SPECIALPAGE_XENSTORE)); - xc_set_hvm_param(xch, dom, HVM_PARAM_BUFIOREQ_PFN, - special_pfn(SPECIALPAGE_BUFIOREQ)); - xc_set_hvm_param(xch, dom, HVM_PARAM_IOREQ_PFN, - special_pfn(SPECIALPAGE_IOREQ)); + special_pfn(SPECIALPAGE_XENSTORE, nr_special_pages)); xc_set_hvm_param(xch, dom, HVM_PARAM_CONSOLE_PFN, - special_pfn(SPECIALPAGE_CONSOLE)); + special_pfn(SPECIALPAGE_CONSOLE, nr_special_pages)); xc_set_hvm_param(xch, dom, HVM_PARAM_PAGING_RING_PFN, - special_pfn(SPECIALPAGE_PAGING)); + special_pfn(SPECIALPAGE_PAGING, nr_special_pages)); xc_set_hvm_param(xch, dom, HVM_PARAM_ACCESS_RING_PFN, - special_pfn(SPECIALPAGE_ACCESS)); + special_pfn(SPECIALPAGE_ACCESS, nr_special_pages)); xc_set_hvm_param(xch, dom, HVM_PARAM_SHARING_RING_PFN, - special_pfn(SPECIALPAGE_SHARING)); + special_pfn(SPECIALPAGE_SHARING, nr_special_pages)); + xc_set_hvm_param(xch, dom, HVM_PARAM_IO_PFN_FIRST, + special_pfn(NR_SPECIAL_PAGES, nr_special_pages)); + xc_set_hvm_param(xch, dom, HVM_PARAM_IO_PFN_LAST, + special_pfn(NR_SPECIAL_PAGES + nr_special_pages - 1, + nr_special_pages)); /* * Identity-map page table is required for running with CR0.PG=0 when @@ -379,14 +380,14 @@ static int setup_guest(xc_interface *xch, */ if ( (ident_pt = xc_map_foreign_range( xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE, - special_pfn(SPECIALPAGE_IDENT_PT))) == NULL ) + special_pfn(SPECIALPAGE_IDENT_PT, nr_special_pages))) == NULL ) goto error_out; for ( i = 0; i < PAGE_SIZE / sizeof(*ident_pt); i++ ) ident_pt[i] = ((i << 22) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); munmap(ident_pt, PAGE_SIZE); xc_set_hvm_param(xch, dom, HVM_PARAM_IDENT_PT, - special_pfn(SPECIALPAGE_IDENT_PT) << PAGE_SHIFT); + special_pfn(SPECIALPAGE_IDENT_PT, nr_special_pages) << PAGE_SHIFT); /* Insert JMP instruction at address 0x0 to reach entry point. */ entry_eip = elf_uval(&elf, elf.ehdr, e_entry); @@ -454,16 +455,18 @@ int xc_hvm_build(xc_interface *xch, uint32_t domid, * If target == memsize, pages are populated normally. */ int xc_hvm_build_target_mem(xc_interface *xch, - uint32_t domid, - int memsize, - int target, - const char *image_name) + uint32_t domid, + int memsize, + int target, + const char *image_name, + uint32_t nr_special_pages) { struct xc_hvm_build_args args = {}; args.mem_size = (uint64_t)memsize << 20; args.mem_target = (uint64_t)target << 20; args.image_file_name = image_name; + args.nr_special_pages = nr_special_pages; return xc_hvm_build(xch, domid, &args); } diff --git a/tools/libxc/xenguest.h b/tools/libxc/xenguest.h index 707e31c..9a0d38f 100644 --- a/tools/libxc/xenguest.h +++ b/tools/libxc/xenguest.h @@ -216,6 +216,7 @@ struct xc_hvm_build_args { uint64_t mem_target; /* Memory target in bytes. */ uint64_t mmio_size; /* Size of the MMIO hole in bytes. */ const char *image_file_name; /* File name of the image to load. */ + uint32_t nr_special_pages; /* Additional special pages for io daemon */ }; /** @@ -234,7 +235,8 @@ int xc_hvm_build_target_mem(xc_interface *xch, uint32_t domid, int memsize, int target, - const char *image_name); + const char *image_name, + uint32_t nr_special_pages); int xc_suspend_evtchn_release(xc_interface *xch, xc_evtchn *xce, int domid, int suspend_evtchn); diff --git a/tools/python/xen/lowlevel/xc/xc.c b/tools/python/xen/lowlevel/xc/xc.c index 7c89756..eb004b6 100644 --- a/tools/python/xen/lowlevel/xc/xc.c +++ b/tools/python/xen/lowlevel/xc/xc.c @@ -984,8 +984,9 @@ static PyObject *pyxc_hvm_build(XcObject *self, if ( target == -1 ) target = memsize; + // Ugly fix : we must retrieve the number of servers if ( xc_hvm_build_target_mem(self->xc_handle, dom, memsize, - target, image) != 0 ) + target, image, 0) != 0 ) return pyxc_error_to_exception(self->xc_handle); #if !defined(__ia64__)