Message ID | 1305741151-4793-4-git-send-email-stefano.stabellini@eu.citrix.com |
---|---|
State | New |
Headers | show |
On 05/18/2011 07:52 PM, stefano.stabellini@eu.citrix.com wrote: > From: Stefano Stabellini<stefano.stabellini@eu.citrix.com> > > Introduce qemu_ram_ptr_length that takes an address and a size as > parameters rather than just an address. > > Refactor cpu_physical_memory_map so that we call qemu_ram_ptr_length only > once rather than calling qemu_get_ram_ptr one time per page. > This is not only more efficient but also tries to simplify the logic of > the function. > Currently we are relying on the fact that all the pages are mapped > contiguously in qemu's address space: we have a check to make sure that > the virtual address returned by qemu_get_ram_ptr from the second call on > is consecutive. Now we are making this more explicit replacing all the > calls to qemu_get_ram_ptr with a single call to qemu_ram_ptr_length > passing a size argument. Would the interface at http://permalink.gmane.org/gmane.comp.emulators.qemu/101475 work for you alternatively? Paolo
On Wed, 18 May 2011, Paolo Bonzini wrote: > On 05/18/2011 07:52 PM, stefano.stabellini@eu.citrix.com wrote: > > From: Stefano Stabellini<stefano.stabellini@eu.citrix.com> > > > > Introduce qemu_ram_ptr_length that takes an address and a size as > > parameters rather than just an address. > > > > Refactor cpu_physical_memory_map so that we call qemu_ram_ptr_length only > > once rather than calling qemu_get_ram_ptr one time per page. > > This is not only more efficient but also tries to simplify the logic of > > the function. > > Currently we are relying on the fact that all the pages are mapped > > contiguously in qemu's address space: we have a check to make sure that > > the virtual address returned by qemu_get_ram_ptr from the second call on > > is consecutive. Now we are making this more explicit replacing all the > > calls to qemu_get_ram_ptr with a single call to qemu_ram_ptr_length > > passing a size argument. > > Would the interface at > http://permalink.gmane.org/gmane.comp.emulators.qemu/101475 work for you > alternatively? Unfortunately that interface doesn't solve the problem I am trying to address: cpu_physical_memory_map_fast calls cpu_physical_memory_map_internal that still calls qemu_get_ram_ptr in a loop.
diff --git a/cpu-common.h b/cpu-common.h index 151c32c..085aacb 100644 --- a/cpu-common.h +++ b/cpu-common.h @@ -64,6 +64,7 @@ void qemu_ram_free(ram_addr_t addr); void qemu_ram_remap(ram_addr_t addr, ram_addr_t length); /* This should only be used for ram local to a device. */ void *qemu_get_ram_ptr(ram_addr_t addr); +void *qemu_ram_ptr_length(target_phys_addr_t addr, target_phys_addr_t *size); /* Same but slower, to use for migration, where the order of * RAMBlocks must not change. */ void *qemu_safe_ram_ptr(ram_addr_t addr); diff --git a/exec.c b/exec.c index 56bc636..ee9316e 100644 --- a/exec.c +++ b/exec.c @@ -3110,6 +3110,31 @@ void *qemu_safe_ram_ptr(ram_addr_t addr) return NULL; } +/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr + * but takes a size argument */ +void *qemu_ram_ptr_length(target_phys_addr_t addr, target_phys_addr_t *size) +{ + if (xen_mapcache_enabled()) + return qemu_map_cache(addr, *size, 1); + else { + RAMBlock *block; + + QLIST_FOREACH(block, &ram_list.blocks, next) { + if (addr - block->offset < block->length) { + if (addr - block->offset + *size > block->length) + *size = block->length - addr + block->offset; + return block->host + (addr - block->offset); + } + } + + fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); + abort(); + + *size = 0; + return NULL; + } +} + void qemu_put_ram_ptr(void *addr) { trace_qemu_put_ram_ptr(addr); @@ -3971,14 +3996,12 @@ void *cpu_physical_memory_map(target_phys_addr_t addr, int is_write) { target_phys_addr_t len = *plen; - target_phys_addr_t done = 0; + target_phys_addr_t todo = 0; int l; - uint8_t *ret = NULL; - uint8_t *ptr; target_phys_addr_t page; unsigned long pd; PhysPageDesc *p; - unsigned long addr1; + target_phys_addr_t addr1 = addr; while (len > 0) { page = addr & TARGET_PAGE_MASK; @@ -3993,7 +4016,7 @@ void *cpu_physical_memory_map(target_phys_addr_t addr, } if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { - if (done || bounce.buffer) { + if (todo || bounce.buffer) { break; } bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); @@ -4002,23 +4025,17 @@ void *cpu_physical_memory_map(target_phys_addr_t addr, if (!is_write) { cpu_physical_memory_read(addr, bounce.buffer, l); } - ptr = bounce.buffer; - } else { - addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); - ptr = qemu_get_ram_ptr(addr1); - } - if (!done) { - ret = ptr; - } else if (ret + done != ptr) { - break; + + *plen = l; + return bounce.buffer; } len -= l; addr += l; - done += l; + todo += l; } - *plen = done; - return ret; + *plen = todo; + return qemu_ram_ptr_length(addr1, plen); } /* Unmaps a memory region previously mapped by cpu_physical_memory_map().