diff mbox

[1/2] memory: Provide separate handling of unassigned io ports accesses

Message ID 51FCBFCD.5020608@web.de
State New
Headers show

Commit Message

Jan Kiszka Aug. 3, 2013, 8:31 a.m. UTC
From: Jan Kiszka <jan.kiszka@siemens.com>

Accesses to unassigned io ports shall return -1 on read and be ignored
on write. Ensure these properties via dedicated ops, decoupling us from
the memory core's handling of unassigned accesses.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
---
 exec.c                |    3 ++-
 include/exec/ioport.h |    2 ++
 ioport.c              |   16 ++++++++++++++++
 3 files changed, 20 insertions(+), 1 deletions(-)

Comments

Andreas Färber Aug. 5, 2013, 9:34 a.m. UTC | #1
Am 03.08.2013 10:31, schrieb Jan Kiszka:
> From: Jan Kiszka <jan.kiszka@siemens.com>
> 
> Accesses to unassigned io ports shall return -1 on read and be ignored
> on write. Ensure these properties via dedicated ops, decoupling us from
> the memory core's handling of unassigned accesses.
> 
> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> ---
>  exec.c                |    3 ++-
>  include/exec/ioport.h |    2 ++
>  ioport.c              |   16 ++++++++++++++++
>  3 files changed, 20 insertions(+), 1 deletions(-)
> 
> diff --git a/exec.c b/exec.c
> index 3ca9381..9ed598f 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -1820,7 +1820,8 @@ static void memory_map_init(void)
>      address_space_init(&address_space_memory, system_memory, "memory");
>  
>      system_io = g_malloc(sizeof(*system_io));
> -    memory_region_init(system_io, NULL, "io", 65536);
> +    memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
> +                          65536);

It was reported that there may be some machines/PHBs that have
overlapping PIO/MMIO. Unless we use priorities, this ..._io MemoryRegion
will shadow or conflict with any ..._io MemoryRegion added to the memory
address space, wouldn't it?

Regards,
Andreas

>      address_space_init(&address_space_io, system_io, "I/O");
>  
>      memory_listener_register(&core_memory_listener, &address_space_memory);
> diff --git a/include/exec/ioport.h b/include/exec/ioport.h
> index bdd4e96..84f7f85 100644
> --- a/include/exec/ioport.h
> +++ b/include/exec/ioport.h
> @@ -45,6 +45,8 @@ typedef struct MemoryRegionPortio {
>  
>  #define PORTIO_END_OF_LIST() { }
>  
> +extern const MemoryRegionOps unassigned_io_ops;
> +
>  void cpu_outb(pio_addr_t addr, uint8_t val);
>  void cpu_outw(pio_addr_t addr, uint16_t val);
>  void cpu_outl(pio_addr_t addr, uint32_t val);
> diff --git a/ioport.c b/ioport.c
> index 79b7f1a..9765588 100644
> --- a/ioport.c
> +++ b/ioport.c
> @@ -44,6 +44,22 @@ typedef struct MemoryRegionPortioList {
>      MemoryRegionPortio ports[];
>  } MemoryRegionPortioList;
>  
> +static uint64_t unassigned_io_read(void *opaque, hwaddr addr, unsigned size)
> +{
> +    return -1UL;
> +}
> +
> +static void unassigned_io_write(void *opaque, hwaddr addr, uint64_t val,
> +                                unsigned size)
> +{
> +}
> +
> +const MemoryRegionOps unassigned_io_ops = {
> +    .read = unassigned_io_read,
> +    .write = unassigned_io_write,
> +    .endianness = DEVICE_NATIVE_ENDIAN,
> +};
> +
>  void cpu_outb(pio_addr_t addr, uint8_t val)
>  {
>      LOG_IOPORT("outb: %04"FMT_pioaddr" %02"PRIx8"\n", addr, val);
>
Jan Kiszka Aug. 5, 2013, 9:38 a.m. UTC | #2
On 2013-08-05 11:34, Andreas Färber wrote:
> Am 03.08.2013 10:31, schrieb Jan Kiszka:
>> From: Jan Kiszka <jan.kiszka@siemens.com>
>>
>> Accesses to unassigned io ports shall return -1 on read and be ignored
>> on write. Ensure these properties via dedicated ops, decoupling us from
>> the memory core's handling of unassigned accesses.
>>
>> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
>> ---
>>  exec.c                |    3 ++-
>>  include/exec/ioport.h |    2 ++
>>  ioport.c              |   16 ++++++++++++++++
>>  3 files changed, 20 insertions(+), 1 deletions(-)
>>
>> diff --git a/exec.c b/exec.c
>> index 3ca9381..9ed598f 100644
>> --- a/exec.c
>> +++ b/exec.c
>> @@ -1820,7 +1820,8 @@ static void memory_map_init(void)
>>      address_space_init(&address_space_memory, system_memory, "memory");
>>  
>>      system_io = g_malloc(sizeof(*system_io));
>> -    memory_region_init(system_io, NULL, "io", 65536);
>> +    memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
>> +                          65536);
> 
> It was reported that there may be some machines/PHBs that have
> overlapping PIO/MMIO. Unless we use priorities, this ..._io MemoryRegion
> will shadow or conflict with any ..._io MemoryRegion added to the memory
> address space, wouldn't it?

I cannot follow yet. This is the base mem-region for the PIO space. If
some region from the MMIO space forwards request here and there is no
backing device, this base region will handle it. So, even on arch with
MMIO-based PIO, both address space should remain separate. Or am I
missing something?

Jan
Peter Maydell Aug. 5, 2013, 9:59 a.m. UTC | #3
On 5 August 2013 10:34, Andreas Färber <afaerber@suse.de> wrote:
> Am 03.08.2013 10:31, schrieb Jan Kiszka:
>> From: Jan Kiszka <jan.kiszka@siemens.com>
>>
>> Accesses to unassigned io ports shall return -1 on read and be ignored
>> on write. Ensure these properties via dedicated ops, decoupling us from
>> the memory core's handling of unassigned accesses.
>>
>> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
>> ---
>>  exec.c                |    3 ++-
>>  include/exec/ioport.h |    2 ++
>>  ioport.c              |   16 ++++++++++++++++
>>  3 files changed, 20 insertions(+), 1 deletions(-)
>>
>> diff --git a/exec.c b/exec.c
>> index 3ca9381..9ed598f 100644
>> --- a/exec.c
>> +++ b/exec.c
>> @@ -1820,7 +1820,8 @@ static void memory_map_init(void)
>>      address_space_init(&address_space_memory, system_memory, "memory");
>>
>>      system_io = g_malloc(sizeof(*system_io));
>> -    memory_region_init(system_io, NULL, "io", 65536);
>> +    memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
>> +                          65536);
>
> It was reported that there may be some machines/PHBs that have
> overlapping PIO/MMIO. Unless we use priorities, this ..._io MemoryRegion
> will shadow or conflict with any ..._io MemoryRegion added to the memory
> address space, wouldn't it?

Priorities only apply between different subregions within a
container. This is adding IO operations to the container itself,
so there's no priority issue here: the container's operations
always come last, behind any subregions it has.

(Do we have any existing examples of container regions with their
own default IO operations? The memory.c code clearly expects them
to be OK, though - eg render_memory_region() specifically does
"render subregions; then render the region itself into any gaps".)

Or do you mean that if we had:

 [ system memory region, with its own default read/write ops ]
     [ io region mapped into it ]
         [ io ]   [ io ][io]

that now if you access the bit of system memory corresponding
to the I/O region at some address with no specific IO port,
you'll get the IO region's defaults, rather than the system
memory region's defaults? I think that's true and possibly
a change in behaviour. Do we have any boards that do that?

-- PMM
Andreas Färber Aug. 5, 2013, 10:29 a.m. UTC | #4
Am 05.08.2013 11:59, schrieb Peter Maydell:
> On 5 August 2013 10:34, Andreas Färber <afaerber@suse.de> wrote:
>> Am 03.08.2013 10:31, schrieb Jan Kiszka:
>>> From: Jan Kiszka <jan.kiszka@siemens.com>
>>>
>>> Accesses to unassigned io ports shall return -1 on read and be ignored
>>> on write. Ensure these properties via dedicated ops, decoupling us from
>>> the memory core's handling of unassigned accesses.
>>>
>>> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
>>> ---
>>>  exec.c                |    3 ++-
>>>  include/exec/ioport.h |    2 ++
>>>  ioport.c              |   16 ++++++++++++++++
>>>  3 files changed, 20 insertions(+), 1 deletions(-)
>>>
>>> diff --git a/exec.c b/exec.c
>>> index 3ca9381..9ed598f 100644
>>> --- a/exec.c
>>> +++ b/exec.c
>>> @@ -1820,7 +1820,8 @@ static void memory_map_init(void)
>>>      address_space_init(&address_space_memory, system_memory, "memory");
>>>
>>>      system_io = g_malloc(sizeof(*system_io));
>>> -    memory_region_init(system_io, NULL, "io", 65536);
>>> +    memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
>>> +                          65536);
>>
>> It was reported that there may be some machines/PHBs that have
>> overlapping PIO/MMIO. Unless we use priorities, this ..._io MemoryRegion
>> will shadow or conflict with any ..._io MemoryRegion added to the memory
>> address space, wouldn't it?
> 
> Priorities only apply between different subregions within a
> container. This is adding IO operations to the container itself,
> so there's no priority issue here: the container's operations
> always come last, behind any subregions it has.
> 
> (Do we have any existing examples of container regions with their
> own default IO operations? The memory.c code clearly expects them
> to be OK, though - eg render_memory_region() specifically does
> "render subregions; then render the region itself into any gaps".)
> 
> Or do you mean that if we had:
> 
>  [ system memory region, with its own default read/write ops ]
>      [ io region mapped into it ]
>          [ io ]   [ io ][io]
> 
> that now if you access the bit of system memory corresponding
> to the I/O region at some address with no specific IO port,
> you'll get the IO region's defaults, rather than the system
> memory region's defaults? I think that's true and possibly
> a change in behaviour.

Yes, that's what I thought someone brought up in one of those lengthy
memory discussions: Accesses between the first two [io]s would now seem
to go to [io region] rather than into [system memory region] subregions
at the same level as [io region].

> Do we have any boards that do that?

Sorry, I don't remember who brought it up, hopefully Paolo remembers?
Possibly sPAPR?

Andreas
Jan Kiszka Aug. 5, 2013, 10:30 a.m. UTC | #5
On 2013-08-05 11:59, Peter Maydell wrote:
> On 5 August 2013 10:34, Andreas Färber <afaerber@suse.de> wrote:
>> Am 03.08.2013 10:31, schrieb Jan Kiszka:
>>> From: Jan Kiszka <jan.kiszka@siemens.com>
>>>
>>> Accesses to unassigned io ports shall return -1 on read and be ignored
>>> on write. Ensure these properties via dedicated ops, decoupling us from
>>> the memory core's handling of unassigned accesses.
>>>
>>> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
>>> ---
>>>  exec.c                |    3 ++-
>>>  include/exec/ioport.h |    2 ++
>>>  ioport.c              |   16 ++++++++++++++++
>>>  3 files changed, 20 insertions(+), 1 deletions(-)
>>>
>>> diff --git a/exec.c b/exec.c
>>> index 3ca9381..9ed598f 100644
>>> --- a/exec.c
>>> +++ b/exec.c
>>> @@ -1820,7 +1820,8 @@ static void memory_map_init(void)
>>>      address_space_init(&address_space_memory, system_memory, "memory");
>>>
>>>      system_io = g_malloc(sizeof(*system_io));
>>> -    memory_region_init(system_io, NULL, "io", 65536);
>>> +    memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
>>> +                          65536);
>>
>> It was reported that there may be some machines/PHBs that have
>> overlapping PIO/MMIO. Unless we use priorities, this ..._io MemoryRegion
>> will shadow or conflict with any ..._io MemoryRegion added to the memory
>> address space, wouldn't it?
> 
> Priorities only apply between different subregions within a
> container. This is adding IO operations to the container itself,
> so there's no priority issue here: the container's operations
> always come last, behind any subregions it has.
> 
> (Do we have any existing examples of container regions with their
> own default IO operations? The memory.c code clearly expects them
> to be OK, though - eg render_memory_region() specifically does
> "render subregions; then render the region itself into any gaps".)
> 
> Or do you mean that if we had:
> 
>  [ system memory region, with its own default read/write ops ]

I cannot imagine how this could work. The system memory region has no
clue about what the regions below it can handle and what not. So it has
to pass through the io window.

Jan

>      [ io region mapped into it ]
>          [ io ]   [ io ][io]
> 
> that now if you access the bit of system memory corresponding
> to the I/O region at some address with no specific IO port,
> you'll get the IO region's defaults, rather than the system
> memory region's defaults? I think that's true and possibly
> a change in behaviour. Do we have any boards that do that?
> 
> -- PMM
>
Peter Maydell Aug. 5, 2013, 10:36 a.m. UTC | #6
On 5 August 2013 11:30, Jan Kiszka <jan.kiszka@web.de> wrote:
> On 2013-08-05 11:59, Peter Maydell wrote:
>> Or do you mean that if we had:
>>
>>  [ system memory region, with its own default read/write ops ]
>
> I cannot imagine how this could work. The system memory region has no
> clue about what the regions below it can handle and what not. So it has
> to pass through the io window.

The system memory region's just a container, you can add a
background region to it at lowest-possible-priority, which
then takes accesses which are either (a) not in any subregion
or (b) in a subregion but that container doesn't specify
its own io ops and nothing in that container handles the
access. (Or you could create the system memory region with
its own IO ops, which would have the same effect.)

This works because the memory subsystem flattens the whole
tree, and at flattening time render_memory_region() effectively
knows if there are gaps in the subcontainer or not. At runtime
we never "pass through" anything (except for the special case
of an IOMMU) -- the tree of regions has been preflattened
and the target of the memory access is known in advance.

-- PMM
Jan Kiszka Aug. 5, 2013, 10:44 a.m. UTC | #7
On 2013-08-05 12:36, Peter Maydell wrote:
> On 5 August 2013 11:30, Jan Kiszka <jan.kiszka@web.de> wrote:
>> On 2013-08-05 11:59, Peter Maydell wrote:
>>> Or do you mean that if we had:
>>>
>>>  [ system memory region, with its own default read/write ops ]
>>
>> I cannot imagine how this could work. The system memory region has no
>> clue about what the regions below it can handle and what not. So it has
>> to pass through the io window.
> 
> The system memory region's just a container, you can add a
> background region to it at lowest-possible-priority, which
> then takes accesses which are either (a) not in any subregion
> or (b) in a subregion but that container doesn't specify
> its own io ops and nothing in that container handles the
> access. (Or you could create the system memory region with
> its own IO ops, which would have the same effect.)

First, we do not render MMIO and IO within the same address space so
far. But even if we would, the IO container now catches all accesses, so
the system memory region will never have its default handler run for
that window.

Jan
Peter Maydell Aug. 5, 2013, 10:51 a.m. UTC | #8
On 5 August 2013 11:44, Jan Kiszka <jan.kiszka@web.de> wrote:
> On 2013-08-05 12:36, Peter Maydell wrote:
>> On 5 August 2013 11:30, Jan Kiszka <jan.kiszka@web.de> wrote:
>>> On 2013-08-05 11:59, Peter Maydell wrote:
>>>> Or do you mean that if we had:
>>>>
>>>>  [ system memory region, with its own default read/write ops ]
>>>
>>> I cannot imagine how this could work. The system memory region has no
>>> clue about what the regions below it can handle and what not. So it has
>>> to pass through the io window.
>>
>> The system memory region's just a container, you can add a
>> background region to it at lowest-possible-priority, which
>> then takes accesses which are either (a) not in any subregion
>> or (b) in a subregion but that container doesn't specify
>> its own io ops and nothing in that container handles the
>> access. (Or you could create the system memory region with
>> its own IO ops, which would have the same effect.)
>
> First, we do not render MMIO and IO within the same address space so
> far.

Is this a statement made because you've checked all the boards
and know that nobody's mapping the system-io memory region into
the system-memory region? (It is technically trivial, you
just need to call memory_region_add_subregion() directly
or indirectly...)

> But even if we would, the IO container now catches all accesses, so
> the system memory region will never have its default handler run for
> that window.

Yes, that is the point -- before the IO container caught all
accesses, the default handler that would run would be the
system memory region, but now that the IO container has
default read/write ops they will take precedence. So this
would be a behaviour change if there are any boards set up
like this. (I'm not sure there are any, though.)

-- PMM
Jan Kiszka Aug. 5, 2013, 11:03 a.m. UTC | #9
On 2013-08-05 12:51, Peter Maydell wrote:
> On 5 August 2013 11:44, Jan Kiszka <jan.kiszka@web.de> wrote:
>> On 2013-08-05 12:36, Peter Maydell wrote:
>>> On 5 August 2013 11:30, Jan Kiszka <jan.kiszka@web.de> wrote:
>>>> On 2013-08-05 11:59, Peter Maydell wrote:
>>>>> Or do you mean that if we had:
>>>>>
>>>>>  [ system memory region, with its own default read/write ops ]
>>>>
>>>> I cannot imagine how this could work. The system memory region has no
>>>> clue about what the regions below it can handle and what not. So it has
>>>> to pass through the io window.
>>>
>>> The system memory region's just a container, you can add a
>>> background region to it at lowest-possible-priority, which
>>> then takes accesses which are either (a) not in any subregion
>>> or (b) in a subregion but that container doesn't specify
>>> its own io ops and nothing in that container handles the
>>> access. (Or you could create the system memory region with
>>> its own IO ops, which would have the same effect.)
>>
>> First, we do not render MMIO and IO within the same address space so
>> far.
> 
> Is this a statement made because you've checked all the boards
> and know that nobody's mapping the system-io memory region into
> the system-memory region? (It is technically trivial, you
> just need to call memory_region_add_subregion() directly
> or indirectly...)

I know this because I just recently wrote the patch that enables this
trivial step, i.e. converted PIO dispatching to the memory subsystem.

Jan
Paolo Bonzini Aug. 5, 2013, 11:07 a.m. UTC | #10
On Aug 05 2013, Jan Kiszka wrote:
> On 2013-08-05 12:36, Peter Maydell wrote:
> > On 5 August 2013 11:30, Jan Kiszka <jan.kiszka@web.de> wrote:
> >> On 2013-08-05 11:59, Peter Maydell wrote:
> >>> Or do you mean that if we had:
> >>>
> >>>  [ system memory region, with its own default read/write ops ]
> >>
> >> I cannot imagine how this could work. The system memory region has no
> >> clue about what the regions below it can handle and what not. So it has
> >> to pass through the io window.
> > 
> > The system memory region's just a container, you can add a
> > background region to it at lowest-possible-priority, which
> > then takes accesses which are either (a) not in any subregion
> > or (b) in a subregion but that container doesn't specify
> > its own io ops and nothing in that container handles the
> > access. (Or you could create the system memory region with
> > its own IO ops, which would have the same effect.)
> 
> First, we do not render MMIO and IO within the same address space so
> far.

This is not true, we do render the I/O address space in system memory
on machines that support PCI or ISA but are not x86.  We do that through
an alias of get_system_io(), but that doesn't change the picture.

> But even if we would, the IO container now catches all accesses, so
> the system memory region will never have its default handler run for
> that window.

This however is true, and is the right thing to do.  It is also
very similar to the workaround that Richard did on Alpha (commit
3661049fec64ffd7ab008e57e396881c6a4b53a4) and it should be possible
to revert that one indeed.

Paolo
Paolo Bonzini Aug. 5, 2013, 11:10 a.m. UTC | #11
On Aug 05 2013, Peter Maydell wrote:
> > But even if we would, the IO container now catches all accesses, so
> > the system memory region will never have its default handler run for
> > that window.
> 
> Yes, that is the point -- before the IO container caught all
> accesses, the default handler that would run would be the
> system memory region, but now that the IO container has
> default read/write ops they will take precedence. So this
> would be a behaviour change if there are any boards set up
> like this. (I'm not sure there are any, though.)

Before 1.6 this would have happened anyway.  Before 1.6, all
I/O accesses would go through cpu_in/cpu_out, and would never
hit the system memory region.  So Jan's patch is effectively
restoring old behavior.

Paolo
Andreas Färber Aug. 5, 2013, 11:35 a.m. UTC | #12
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA1

Am 05.08.2013 13:03, schrieb Jan Kiszka:
> On 2013-08-05 12:51, Peter Maydell wrote:
>> On 5 August 2013 11:44, Jan Kiszka <jan.kiszka@web.de> wrote:
>>> On 2013-08-05 12:36, Peter Maydell wrote:
>>>> On 5 August 2013 11:30, Jan Kiszka <jan.kiszka@web.de>
>>>> wrote:
>>>>> On 2013-08-05 11:59, Peter Maydell wrote:
>>>>>> Or do you mean that if we had:
>>>>>> 
>>>>>> [ system memory region, with its own default read/write
>>>>>> ops ]
>>>>> 
>>>>> I cannot imagine how this could work. The system memory
>>>>> region has no clue about what the regions below it can
>>>>> handle and what not. So it has to pass through the io
>>>>> window.
>>>> 
>>>> The system memory region's just a container, you can add a 
>>>> background region to it at lowest-possible-priority, which 
>>>> then takes accesses which are either (a) not in any
>>>> subregion or (b) in a subregion but that container doesn't
>>>> specify its own io ops and nothing in that container handles
>>>> the access. (Or you could create the system memory region
>>>> with its own IO ops, which would have the same effect.)
>>> 
>>> First, we do not render MMIO and IO within the same address
>>> space so far.
>> 
>> Is this a statement made because you've checked all the boards 
>> and know that nobody's mapping the system-io memory region into 
>> the system-memory region? (It is technically trivial, you just
>> need to call memory_region_add_subregion() directly or
>> indirectly...)
> 
> I know this because I just recently wrote the patch that enables
> this trivial step, i.e. converted PIO dispatching to the memory
> subsystem.

Several patches have been applied since, e.g.

sPAPR PHB:
http://git.qemu.org/?p=qemu.git;a=commit;h=66aab867cedd2a2d81b4d64eff7c3e0f6f272bbf
- -> aliases system_io()

PReP i82378 PCI-ISA bridge:
http://git.qemu.org/?p=qemu.git;a=commit;h=5c9736789b79ea49cd236ac326f0a414f63b1015
- -> uses pci_address_space_io()

Alpha Typhoon:
http://git.qemu.org/?p=qemu.git;a=commit;h=056e6bae1c91f47165d962564f82f5176bae47f0
http://git.qemu.org/?p=qemu.git;a=commit;h=3661049fec64ffd7ab008e57e396881c6a4b53a4

[For those joining late, this discussion is about whether making PIO
MemoryRegion ..._io rather than just container might hurt some use
case. If you have a concrete test case that would be appreciated; a
we-don't-care-about-such-a-fringe-case would help as well.]

Andreas

- -- 
SUSE LINUX Products GmbH, Maxfeldstr. 5, 90409 Nürnberg, Germany
GF: Jeff Hawn, Jennifer Guild, Felix Imendörffer; HRB 16746 AG Nürnberg
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v2.0.19 (GNU/Linux)

iQIcBAEBAgAGBQJR/43lAAoJEPou0S0+fgE/EiMQAJP7YfqF+YYKvS5NUonLMo26
ncsY7E9IikpWSCcL/QVwmfFTfDjptR0iI987+4OcE3Sf0nH3pv5FFge4aR965Of1
sBvqfAVD9ihnV2sO+s2rImgw9tyC/kadA6k0hLB8h/+QDgl/J5wjpI0A29OHT8Uc
BYv210POLqzU+nAO83t9w3xZA3Q60+X8YxI3GqAd6CEMS7i2yga0SK2Q3U1P7o+G
sE6hqYE7sWKtaQlosTWuAxYnOOXcY2u+EEnqS6SXqZbp4NcitRYCamNu+8ARDAab
JNIbbmCt1GTzdQ6PxVu5paPRUJbETQ2rDWxAq4Ryr37Gi5vb9FFPNMKpSncNu28b
o46Fr6QMDTMNgk0Ac7F4WKKICGsCMuHuzot7JLoRkfhPQunqwQR5yEyvUKwYtV5p
pAl1Frop+88X6HEmUkNJHR1FnhI5pVLImyxJTLXVRGgZqx7i1J8cvVPM6PhWa8tU
NFFVKy7aWNj5MOVE8oSGK+LWilikb7i0b7YxDI/Ky/OI62niy3CK+XQgYfSFXEZ/
s8ErAfI5CMKy0O1/KbUy6V3cGpGM2xRizBSDCy3GqP3XD3If3tG8siRSarEG2FxO
TO73OWtYarAtZ5cIRIsefV3uT/CA6g8p/dXgfp1WxDSIZcqZIMz5FIGgIDoIT7i4
6fPuo2wYiZXLS3/g6tuP
=egTc
-----END PGP SIGNATURE-----
Jan Kiszka Aug. 5, 2013, 11:38 a.m. UTC | #13
On 2013-08-05 13:35, Andreas Färber wrote:
> Am 05.08.2013 13:03, schrieb Jan Kiszka:
>> On 2013-08-05 12:51, Peter Maydell wrote:
>>> On 5 August 2013 11:44, Jan Kiszka <jan.kiszka@web.de> wrote:
>>>> On 2013-08-05 12:36, Peter Maydell wrote:
>>>>> On 5 August 2013 11:30, Jan Kiszka <jan.kiszka@web.de>
>>>>> wrote:
>>>>>> On 2013-08-05 11:59, Peter Maydell wrote:
>>>>>>> Or do you mean that if we had:
>>>>>>>
>>>>>>> [ system memory region, with its own default read/write
>>>>>>> ops ]
>>>>>>
>>>>>> I cannot imagine how this could work. The system memory
>>>>>> region has no clue about what the regions below it can
>>>>>> handle and what not. So it has to pass through the io
>>>>>> window.
>>>>>
>>>>> The system memory region's just a container, you can add a 
>>>>> background region to it at lowest-possible-priority, which 
>>>>> then takes accesses which are either (a) not in any
>>>>> subregion or (b) in a subregion but that container doesn't
>>>>> specify its own io ops and nothing in that container handles
>>>>> the access. (Or you could create the system memory region
>>>>> with its own IO ops, which would have the same effect.)
>>>>
>>>> First, we do not render MMIO and IO within the same address
>>>> space so far.
>>>
>>> Is this a statement made because you've checked all the boards 
>>> and know that nobody's mapping the system-io memory region into 
>>> the system-memory region? (It is technically trivial, you just
>>> need to call memory_region_add_subregion() directly or
>>> indirectly...)
> 
>> I know this because I just recently wrote the patch that enables
>> this trivial step, i.e. converted PIO dispatching to the memory
>> subsystem.
> 
> Several patches have been applied since, e.g.
> 
> sPAPR PHB:
> http://git.qemu.org/?p=qemu.git;a=commit;h=66aab867cedd2a2d81b4d64eff7c3e0f6f272bbf
> -> aliases system_io()
> 
> PReP i82378 PCI-ISA bridge:
> http://git.qemu.org/?p=qemu.git;a=commit;h=5c9736789b79ea49cd236ac326f0a414f63b1015
> -> uses pci_address_space_io()
> 
> Alpha Typhoon:
> http://git.qemu.org/?p=qemu.git;a=commit;h=056e6bae1c91f47165d962564f82f5176bae47f0
> http://git.qemu.org/?p=qemu.git;a=commit;h=3661049fec64ffd7ab008e57e396881c6a4b53a4
> 
> [For those joining late, this discussion is about whether making PIO
> MemoryRegion ..._io rather than just container might hurt some use
> case. If you have a concrete test case that would be appreciated; a
> we-don't-care-about-such-a-fringe-case would help as well.]

OK, one assumption became outdated, but the other will remain true once
the patch is applied. So let's close this discussion.

Jan
Richard Henderson Aug. 5, 2013, 8:19 p.m. UTC | #14
On 08/04/2013 11:59 PM, Peter Maydell wrote:
> (Do we have any existing examples of container regions with their
> own default IO operations? The memory.c code clearly expects them
> to be OK, though - eg render_memory_region() specifically does
> "render subregions; then render the region itself into any gaps".)

See the Alpha pci_io container, which I defaulted to not throw exceptions
because we don't emulate all of the devices on the SuperIO chip.  Exactly
the problem we're talking about in this thread, no?


r~
Peter Maydell Aug. 8, 2013, 3:33 p.m. UTC | #15
On 3 August 2013 09:31, Jan Kiszka <jan.kiszka@web.de> wrote:
> --- a/ioport.c
> +++ b/ioport.c
> @@ -44,6 +44,22 @@ typedef struct MemoryRegionPortioList {
>      MemoryRegionPortio ports[];
>  } MemoryRegionPortioList;
>
> +static uint64_t unassigned_io_read(void *opaque, hwaddr addr, unsigned size)
> +{
> +    return -1UL;

This should probably be "-1ULL", otherwise we'll return
different values on 32 bit and 64 bit hosts. (Actually
managing a 64 bit read of the i/o space is pretty
unlikely, though possibly alpha memory-mapped via the
PCI space might let you do it.)

PS: something about the way these patches were submitted
has confused Anthony's patches tool -- it reports them
as two separate patches rather than a single series.
(No cover letter, maybe?)

thanks
-- PMM
Jan Kiszka Aug. 8, 2013, 3:43 p.m. UTC | #16
On 2013-08-08 17:33, Peter Maydell wrote:
> On 3 August 2013 09:31, Jan Kiszka <jan.kiszka@web.de> wrote:
>> --- a/ioport.c
>> +++ b/ioport.c
>> @@ -44,6 +44,22 @@ typedef struct MemoryRegionPortioList {
>>      MemoryRegionPortio ports[];
>>  } MemoryRegionPortioList;
>>
>> +static uint64_t unassigned_io_read(void *opaque, hwaddr addr, unsigned size)
>> +{
>> +    return -1UL;
> 
> This should probably be "-1ULL", otherwise we'll return
> different values on 32 bit and 64 bit hosts. (Actually
> managing a 64 bit read of the i/o space is pretty
> unlikely, though possibly alpha memory-mapped via the
> PCI space might let you do it.)

No problem with changing this - but wouldn't 64-bit i/o accesses be a
bug? It's not allowed according to PCI, no device can handle it
(officially), so no arch should forward such requests from mmio, rather
break them up first.

> 
> PS: something about the way these patches were submitted
> has confused Anthony's patches tool -- it reports them
> as two separate patches rather than a single series.
> (No cover letter, maybe?)

Something on my side broke the reference from the second to the first email.

Jan
Paolo Bonzini Aug. 9, 2013, 7:41 a.m. UTC | #17
Il 08/08/2013 17:43, Jan Kiszka ha scritto:
> On 2013-08-08 17:33, Peter Maydell wrote:
>> On 3 August 2013 09:31, Jan Kiszka <jan.kiszka@web.de> wrote:
>>> --- a/ioport.c
>>> +++ b/ioport.c
>>> @@ -44,6 +44,22 @@ typedef struct MemoryRegionPortioList {
>>>      MemoryRegionPortio ports[];
>>>  } MemoryRegionPortioList;
>>>
>>> +static uint64_t unassigned_io_read(void *opaque, hwaddr addr, unsigned size)
>>> +{
>>> +    return -1UL;
>>
>> This should probably be "-1ULL", otherwise we'll return
>> different values on 32 bit and 64 bit hosts. (Actually
>> managing a 64 bit read of the i/o space is pretty
>> unlikely, though possibly alpha memory-mapped via the
>> PCI space might let you do it.)
> 
> No problem with changing this - but wouldn't 64-bit i/o accesses be a
> bug? It's not allowed according to PCI, no device can handle it
> (officially), so no arch should forward such requests from mmio, rather
> break them up first.

Yes, the impl.max_access_size should never be 8.  Though 1ULL would be
clearer perhaps.

Paolo
Andreas Färber Aug. 9, 2013, 4:49 p.m. UTC | #18
Am 09.08.2013 09:41, schrieb Paolo Bonzini:
> Il 08/08/2013 17:43, Jan Kiszka ha scritto:
>> On 2013-08-08 17:33, Peter Maydell wrote:
>>> On 3 August 2013 09:31, Jan Kiszka <jan.kiszka@web.de> wrote:
>>>> --- a/ioport.c
>>>> +++ b/ioport.c
>>>> @@ -44,6 +44,22 @@ typedef struct MemoryRegionPortioList {
>>>>      MemoryRegionPortio ports[];
>>>>  } MemoryRegionPortioList;
>>>>
>>>> +static uint64_t unassigned_io_read(void *opaque, hwaddr addr, unsigned size)
>>>> +{
>>>> +    return -1UL;
>>>
>>> This should probably be "-1ULL", otherwise we'll return
>>> different values on 32 bit and 64 bit hosts. (Actually
>>> managing a 64 bit read of the i/o space is pretty
>>> unlikely, though possibly alpha memory-mapped via the
>>> PCI space might let you do it.)
>>
>> No problem with changing this - but wouldn't 64-bit i/o accesses be a
>> bug? It's not allowed according to PCI, no device can handle it
>> (officially), so no arch should forward such requests from mmio, rather
>> break them up first.
> 
> Yes, the impl.max_access_size should never be 8.  Though 1ULL would be
> clearer perhaps.

Let's CC rth.

Andreas
Richard Henderson Aug. 9, 2013, 6:48 p.m. UTC | #19
On 08/09/2013 06:49 AM, Andreas Färber wrote:
> Am 09.08.2013 09:41, schrieb Paolo Bonzini:
>> Il 08/08/2013 17:43, Jan Kiszka ha scritto:
>>> On 2013-08-08 17:33, Peter Maydell wrote:
>>>> On 3 August 2013 09:31, Jan Kiszka <jan.kiszka@web.de> wrote:
>>>>> --- a/ioport.c
>>>>> +++ b/ioport.c
>>>>> @@ -44,6 +44,22 @@ typedef struct MemoryRegionPortioList {
>>>>>      MemoryRegionPortio ports[];
>>>>>  } MemoryRegionPortioList;
>>>>>
>>>>> +static uint64_t unassigned_io_read(void *opaque, hwaddr addr, unsigned size)
>>>>> +{
>>>>> +    return -1UL;
>>>>
>>>> This should probably be "-1ULL", otherwise we'll return
>>>> different values on 32 bit and 64 bit hosts. (Actually
>>>> managing a 64 bit read of the i/o space is pretty
>>>> unlikely, though possibly alpha memory-mapped via the
>>>> PCI space might let you do it.)
>>>
>>> No problem with changing this - but wouldn't 64-bit i/o accesses be a
>>> bug? It's not allowed according to PCI, no device can handle it
>>> (officially), so no arch should forward such requests from mmio, rather
>>> break them up first.
>>
>> Yes, the impl.max_access_size should never be 8.  Though 1ULL would be
>> clearer perhaps.
> 
> Let's CC rth.

At least the CIA and TYPHOON pci host controlers can generate quadword accesses
to all of MEM, IO, and config space.  Whether or not such an access is valid
for any given device is a separate issue.


r~
diff mbox

Patch

diff --git a/exec.c b/exec.c
index 3ca9381..9ed598f 100644
--- a/exec.c
+++ b/exec.c
@@ -1820,7 +1820,8 @@  static void memory_map_init(void)
     address_space_init(&address_space_memory, system_memory, "memory");
 
     system_io = g_malloc(sizeof(*system_io));
-    memory_region_init(system_io, NULL, "io", 65536);
+    memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
+                          65536);
     address_space_init(&address_space_io, system_io, "I/O");
 
     memory_listener_register(&core_memory_listener, &address_space_memory);
diff --git a/include/exec/ioport.h b/include/exec/ioport.h
index bdd4e96..84f7f85 100644
--- a/include/exec/ioport.h
+++ b/include/exec/ioport.h
@@ -45,6 +45,8 @@  typedef struct MemoryRegionPortio {
 
 #define PORTIO_END_OF_LIST() { }
 
+extern const MemoryRegionOps unassigned_io_ops;
+
 void cpu_outb(pio_addr_t addr, uint8_t val);
 void cpu_outw(pio_addr_t addr, uint16_t val);
 void cpu_outl(pio_addr_t addr, uint32_t val);
diff --git a/ioport.c b/ioport.c
index 79b7f1a..9765588 100644
--- a/ioport.c
+++ b/ioport.c
@@ -44,6 +44,22 @@  typedef struct MemoryRegionPortioList {
     MemoryRegionPortio ports[];
 } MemoryRegionPortioList;
 
+static uint64_t unassigned_io_read(void *opaque, hwaddr addr, unsigned size)
+{
+    return -1UL;
+}
+
+static void unassigned_io_write(void *opaque, hwaddr addr, uint64_t val,
+                                unsigned size)
+{
+}
+
+const MemoryRegionOps unassigned_io_ops = {
+    .read = unassigned_io_read,
+    .write = unassigned_io_write,
+    .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
 void cpu_outb(pio_addr_t addr, uint8_t val)
 {
     LOG_IOPORT("outb: %04"FMT_pioaddr" %02"PRIx8"\n", addr, val);