diff mbox

[ovs-dev,v4] OVN localport type support

Message ID 20170526044632.GB2820@ovn.org
State Superseded
Headers show

Commit Message

Ben Pfaff May 26, 2017, 4:46 a.m. UTC
On Tue, May 23, 2017 at 03:13:08PM +0200, Daniel Alvarez Sanchez wrote:
> On Tue, May 23, 2017 at 10:01 AM, Miguel Angel Ajo Pelayo <
> majopela@redhat.com> wrote:
> 
> > If we forsee use cases with several local ports by logical switch/chassis
> > could one option be to allocate a bit in REG10 to mark local ports,
> > and then have a single rule that matches reg10 to drop output/forwarding
> > of packets?
> >
> > I like the idea... let's see what others say about this, I don't know how
> strict we want to be consuming
> bits from registers.
> Thanks Miguel for the suggestion :)

I don't think we need a bit from a register here.  Here's my suggested
incremental followed by a full patch.  It passes the test, at least.
Will you take a look?

Comments

Daniel Alvarez Sanchez May 26, 2017, 12:10 p.m. UTC | #1
Hi Ben,

Thanks for proposing this optimization.
I've tried it and it works. These flows are inserted in table 32 when I
have a logical switch with two logical ports and one localport:

 cookie=0x0, duration=528.253s, table=32, n_packets=416, n_bytes=17584,
idle_age=1, priority=150,reg14=0x3,metadata=0x9 actions=resubmit(,33)

Also, if I try to ping a port in a different chassis from a localport, the
ARP request gets dropped in table 34:

 cookie=0x0, duration=568.319s, table=34, n_packets=448, n_bytes=18816,
idle_age=1, priority=100,reg10=0/0x1,reg14=0x3,reg15=0x3,metadata=0x9
actions=drop

If I insert an static entry in the ARP table for that remote port, then the
packet gets forwarded to table 33 but dropped there due to no hits.

I just submitted a version 5 of the patch including the links to OpenStack
documentation (the patch got already merged) and a minor indentation issue.

Thanks once again!
Daniel

On Fri, May 26, 2017 at 6:46 AM, Ben Pfaff <blp@ovn.org> wrote:

> On Tue, May 23, 2017 at 03:13:08PM +0200, Daniel Alvarez Sanchez wrote:
> > On Tue, May 23, 2017 at 10:01 AM, Miguel Angel Ajo Pelayo <
> > majopela@redhat.com> wrote:
> >
> > > If we forsee use cases with several local ports by logical
> switch/chassis
> > > could one option be to allocate a bit in REG10 to mark local ports,
> > > and then have a single rule that matches reg10 to drop
> output/forwarding
> > > of packets?
> > >
> > > I like the idea... let's see what others say about this, I don't know
> how
> > strict we want to be consuming
> > bits from registers.
> > Thanks Miguel for the suggestion :)
>
> I don't think we need a bit from a register here.  Here's my suggested
> incremental followed by a full patch.  It passes the test, at least.
> Will you take a look?
>
> diff --git a/ovn/controller/physical.c b/ovn/controller/physical.c
> index c98b3053130c..2172dd849893 100644
> --- a/ovn/controller/physical.c
> +++ b/ovn/controller/physical.c
> @@ -293,8 +293,7 @@ consider_port_binding(enum mf_field_id mff_ovn_geneve,
>                        const struct sbrec_port_binding *binding,
>                        const struct sbrec_chassis *chassis,
>                        struct ofpbuf *ofpacts_p,
> -                      struct hmap *flow_table,
> -                      const struct sset *local_lports)
> +                      struct hmap *flow_table)
>  {
>      uint32_t dp_key = binding->datapath->tunnel_key;
>      uint32_t port_key = binding->tunnel_key;
> @@ -602,32 +601,6 @@ consider_port_binding(enum mf_field_id mff_ovn_geneve,
>      } else {
>          /* Remote port connected by tunnel */
>
> -        /* Table 32, priority 150.
> -         * =======================
> -         *
> -         * Drop traffic originated from a localport to a remote
> destination.
> -         */
> -        const char *localport;
> -        SSET_FOR_EACH (localport, local_lports) {
> -            /* Iterate over all local logical ports and insert a drop
> -             * rule with higher priority for every localport in this
> -             * datapath. */
> -            const struct sbrec_port_binding *pb = lport_lookup_by_name(
> -                lports, localport);
> -            if (pb && pb->datapath->tunnel_key == dp_key &&
> -                !strcmp(pb->type, "localport")) {
> -                match_init_catchall(&match);
> -                ofpbuf_clear(ofpacts_p);
> -                /* Match localport logical in_port. */
> -                match_set_reg(&match, MFF_LOG_INPORT - MFF_REG0,
> -                              pb->tunnel_key);
> -                /* Match MFF_LOG_DATAPATH, MFF_LOG_OUTPORT. */
> -                match_set_metadata(&match, htonll(dp_key));
> -                match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0,
> port_key);
> -                ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 150, 0,
> -                                &match, ofpacts_p);
> -            }
> -        }
>          /* Table 32, priority 100.
>           * =======================
>           *
> @@ -919,7 +892,7 @@ physical_run(struct controller_ctx *ctx, enum
> mf_field_id mff_ovn_geneve,
>      SBREC_PORT_BINDING_FOR_EACH (binding, ctx->ovnsb_idl) {
>          consider_port_binding(mff_ovn_geneve, ct_zones, lports,
>                                local_datapaths, binding, chassis,
> -                              &ofpacts, flow_table, local_lports);
> +                              &ofpacts, flow_table);
>      }
>
>      /* Handle output to multicast groups, in tables 32 and 33. */
> @@ -1016,15 +989,40 @@ physical_run(struct controller_ctx *ctx, enum
> mf_field_id mff_ovn_geneve,
>       */
>      struct match match;
>      match_init_catchall(&match);
> -    ofpbuf_clear(&ofpacts);
>      match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0,
>                           MLF_RCV_FROM_VXLAN, MLF_RCV_FROM_VXLAN);
>
>      /* Resubmit to table 33. */
> +    ofpbuf_clear(&ofpacts);
>      put_resubmit(OFTABLE_LOCAL_OUTPUT, &ofpacts);
>      ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 150, 0,
>                      &match, &ofpacts);
>
> +    /* Table 32, priority 150.
> +     * =======================
> +     *
> +     * Handles packets received from ports of type "localport".  These
> ports
> +     * are present on every hypervisor.  Traffic that originates at one
> should
> +     * never go over a tunnel to a remote hypervisor, so resubmit them to
> table
> +     * 33 for local delivery. */
> +    match_init_catchall(&match);
> +    ofpbuf_clear(&ofpacts);
> +    put_resubmit(OFTABLE_LOCAL_OUTPUT, &ofpacts);
> +    const char *localport;
> +    SSET_FOR_EACH (localport, local_lports) {
> +        /* Iterate over all local logical ports and insert a drop
> +         * rule with higher priority for every localport in this
> +         * datapath. */
> +        const struct sbrec_port_binding *pb = lport_lookup_by_name(
> +            lports, localport);
> +        if (pb && !strcmp(pb->type, "localport")) {
> +            match_set_reg(&match, MFF_LOG_INPORT - MFF_REG0,
> pb->tunnel_key);
> +            match_set_metadata(&match, htonll(pb->datapath->tunnel_key));
> +            ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 150, 0,
> +                            &match, &ofpacts);
> +        }
> +    }
> +
>      /* Table 32, Priority 0.
>       * =======================
>       *
> diff --git a/ovn/ovn-architecture.7.xml b/ovn/ovn-architecture.7.xml
> index 2d2d0cc88435..3983a93ee114 100644
> --- a/ovn/ovn-architecture.7.xml
> +++ b/ovn/ovn-architecture.7.xml
> @@ -1000,23 +1000,38 @@
>          hypervisor.  Each flow's actions implement sending a packet to
> the port
>          it matches.  For unicast logical output ports on remote
> hypervisors,
>          the actions set the tunnel key to the correct value, then send the
> -        packet on the tunnel port to the correct hypervisor (unless the
> packet
> -        comes from a localport, in which case it will be dropped). (When
> the
> -        remote hypervisor receives the packet, table 0 there will
> recognize it
> -        as a tunneled packet and pass it along to table 33.)  For
> multicast
> -        logical output ports, the actions send one copy of the packet to
> each
> -        remote hypervisor, in the same way as for unicast destinations.
> If a
> +        packet on the tunnel port to the correct hypervisor.  (When the
> remote
> +        hypervisor receives the packet, table 0 there will recognize it
> as a
> +        tunneled packet and pass it along to table 33.)  For multicast
> logical
> +        output ports, the actions send one copy of the packet to each
> remote
> +        hypervisor, in the same way as for unicast destinations.  If a
>          multicast group includes a logical port or ports on the local
>          hypervisor, then its actions also resubmit to table 33.  Table 32
> also
> -        includes a fallback flow that resubmits to table 33 if there is no
> -        other match.  Table 32 also contains a higher priority rule to
> match
> -        packets received from VXLAN tunnels, based on flag
> MLF_RCV_FROM_VXLAN
> -        and resubmit these packets to table 33 for local delivery. Packets
> -        received from VXLAN tunnels reach here because of a lack of
> logical
> -        output port field in the tunnel key and thus these packets needed
> to
> -        be submitted to table 16 to determine the output port.
> +        includes:
>        </p>
>
> +      <ul>
> +        <li>
> +          A higher-priority rule to match packets received from VXLAN
> tunnels,
> +          based on flag MLF_RCV_FROM_VXLAN, and resubmit these packets to
> table
> +          33 for local delivery.  Packets received from VXLAN tunnels
> reach
> +          here because of a lack of logical output port field in the
> tunnel key
> +          and thus these packets needed to be submitted to table 16 to
> +          determine the output port.
> +        </li>
> +        <li>
> +          A higher-priority rule to match packets received from ports of
> type
> +          <code>localport</code>, based on the logical input port, and
> resubmit
> +          these packets to table 33 for local delivery.  Ports of type
> +          <code>localport</code> exist on every hypervisor and by
> definition
> +          their traffic should never go out through a tunnel.
> +        </li>
> +        <li>
> +          A fallback flow that resubmits to table 33 if there is no other
> +          match.
> +        </li>
> +      </ul>
> +
>        <p>
>          Flows in table 33 resemble those in table 32 but for logical
> ports that
>          reside locally rather than remotely.  For unicast logical output
> ports
>
> --8<--------------------------cut here-------------------------->8--
>
> From: Daniel Alvarez <dalvarez@redhat.com>
> Date: Wed, 10 May 2017 08:35:45 +0000
> Subject: [PATCH] OVN localport type support
>
> This patch introduces a new type of OVN ports called "localport".
> These ports will be present in every hypervisor and may have the
> same IP/MAC addresses. They are not bound to any chassis and traffic
> to these ports will never go through a tunnel.
>
> Its main use case is the OpenStack metadata API support which relies
> on a local agent running on every hypervisor and serving metadata to
> VM's locally. This service is described in detail at [0].
>
> An example to illustrate the purpose of this patch:
>
> - One logical switch sw0 with 2 ports (p1, p2) and 1 localport (lp)
> - Two hypervisors: HV1 and HV2
> - p1 in HV1 (OVS port with external-id:iface-id="p1")
> - p2 in HV2 (OVS port with external-id:iface-id="p2")
> - lp in both hypevisors (OVS port with external-id:iface-id="lp")
> - p1 should be able to reach p2 and viceversa
> - lp on HV1 should be able to reach p1 but not p2
> - lp on HV2 should be able to reach p2 but not p1
>
> Explicit drop rules are inserted in table 32 with priority 150
> in order to prevent traffic originated at a localport to go over
> a tunnel.
>
> [0] https://review.openstack.org/#/c/452811/
>
> Signed-off-by: Daniel Alvarez <dalvarez@redhat.com>
> Signed-off-by: Ben Pfaff <blp@ovn.org>
> ---
>  ovn/controller/binding.c        |   3 +-
>  ovn/controller/ovn-controller.c |   2 +-
>  ovn/controller/physical.c       |  30 +++++++++-
>  ovn/controller/physical.h       |   4 +-
>  ovn/northd/ovn-northd.8.xml     |   8 +--
>  ovn/northd/ovn-northd.c         |   6 +-
>  ovn/ovn-architecture.7.xml      |  44 ++++++++++++---
>  ovn/ovn-nb.xml                  |   9 +++
>  ovn/ovn-sb.xml                  |  14 +++++
>  tests/ovn.at                    | 122 ++++++++++++++++++++++++++++++
> ++++++++++
>  10 files changed, 224 insertions(+), 18 deletions(-)
>
> diff --git a/ovn/controller/binding.c b/ovn/controller/binding.c
> index 95e9deb322b4..83a75434ec79 100644
> --- a/ovn/controller/binding.c
> +++ b/ovn/controller/binding.c
> @@ -380,7 +380,8 @@ consider_local_datapath(struct controller_ctx *ctx,
>          if (iface_rec && qos_map && ctx->ovs_idl_txn) {
>              get_qos_params(binding_rec, qos_map);
>          }
> -        our_chassis = true;
> +       if(strcmp(binding_rec->type, "localport"))
> +            our_chassis = true;
>      } else if (!strcmp(binding_rec->type, "l2gateway")) {
>          const char *chassis_id = smap_get(&binding_rec->options,
>                                            "l2gateway-chassis");
> diff --git a/ovn/controller/ovn-controller.c b/ovn/controller/ovn-
> controller.c
> index f22551d6dcf3..0f4dd35723e6 100644
> --- a/ovn/controller/ovn-controller.c
> +++ b/ovn/controller/ovn-controller.c
> @@ -655,7 +655,7 @@ main(int argc, char *argv[])
>
>                      physical_run(&ctx, mff_ovn_geneve,
>                                   br_int, chassis, &ct_zones, &lports,
> -                                 &flow_table, &local_datapaths);
> +                                 &flow_table, &local_datapaths,
> &local_lports);
>
>                      ofctrl_put(&flow_table, &pending_ct_zones,
>                                 get_nb_cfg(ctx.ovnsb_idl));
> diff --git a/ovn/controller/physical.c b/ovn/controller/physical.c
> index 457fc45414bd..2172dd849893 100644
> --- a/ovn/controller/physical.c
> +++ b/ovn/controller/physical.c
> @@ -769,7 +769,8 @@ physical_run(struct controller_ctx *ctx, enum
> mf_field_id mff_ovn_geneve,
>               const struct ovsrec_bridge *br_int,
>               const struct sbrec_chassis *chassis,
>               const struct simap *ct_zones, struct lport_index *lports,
> -             struct hmap *flow_table, struct hmap *local_datapaths)
> +             struct hmap *flow_table, struct hmap *local_datapaths,
> +             const struct sset *local_lports)
>  {
>
>      /* This bool tracks physical mapping changes. */
> @@ -988,15 +989,40 @@ physical_run(struct controller_ctx *ctx, enum
> mf_field_id mff_ovn_geneve,
>       */
>      struct match match;
>      match_init_catchall(&match);
> -    ofpbuf_clear(&ofpacts);
>      match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0,
>                           MLF_RCV_FROM_VXLAN, MLF_RCV_FROM_VXLAN);
>
>      /* Resubmit to table 33. */
> +    ofpbuf_clear(&ofpacts);
>      put_resubmit(OFTABLE_LOCAL_OUTPUT, &ofpacts);
>      ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 150, 0,
>                      &match, &ofpacts);
>
> +    /* Table 32, priority 150.
> +     * =======================
> +     *
> +     * Handles packets received from ports of type "localport".  These
> ports
> +     * are present on every hypervisor.  Traffic that originates at one
> should
> +     * never go over a tunnel to a remote hypervisor, so resubmit them to
> table
> +     * 33 for local delivery. */
> +    match_init_catchall(&match);
> +    ofpbuf_clear(&ofpacts);
> +    put_resubmit(OFTABLE_LOCAL_OUTPUT, &ofpacts);
> +    const char *localport;
> +    SSET_FOR_EACH (localport, local_lports) {
> +        /* Iterate over all local logical ports and insert a drop
> +         * rule with higher priority for every localport in this
> +         * datapath. */
> +        const struct sbrec_port_binding *pb = lport_lookup_by_name(
> +            lports, localport);
> +        if (pb && !strcmp(pb->type, "localport")) {
> +            match_set_reg(&match, MFF_LOG_INPORT - MFF_REG0,
> pb->tunnel_key);
> +            match_set_metadata(&match, htonll(pb->datapath->tunnel_key));
> +            ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 150, 0,
> +                            &match, &ofpacts);
> +        }
> +    }
> +
>      /* Table 32, Priority 0.
>       * =======================
>       *
> diff --git a/ovn/controller/physical.h b/ovn/controller/physical.h
> index e2debed89362..66aa80e2d998 100644
> --- a/ovn/controller/physical.h
> +++ b/ovn/controller/physical.h
> @@ -32,6 +32,7 @@ struct hmap;
>  struct ovsdb_idl;
>  struct ovsrec_bridge;
>  struct simap;
> +struct sset;
>
>  /* OVN Geneve option information.
>   *
> @@ -45,6 +46,7 @@ void physical_run(struct controller_ctx *, enum
> mf_field_id mff_ovn_geneve,
>                    const struct ovsrec_bridge *br_int,
>                    const struct sbrec_chassis *chassis,
>                    const struct simap *ct_zones, struct lport_index *,
> -                  struct hmap *flow_table, struct hmap *local_datapaths);
> +                  struct hmap *flow_table, struct hmap *local_datapaths,
> +                  const struct sset *local_lports);
>
>  #endif /* ovn/physical.h */
> diff --git a/ovn/northd/ovn-northd.8.xml b/ovn/northd/ovn-northd.8.xml
> index c0b4c5eb0ead..7ff524508c73 100644
> --- a/ovn/northd/ovn-northd.8.xml
> +++ b/ovn/northd/ovn-northd.8.xml
> @@ -492,8 +492,8 @@ output;
>          </pre>
>
>          <p>
> -          These flows are omitted for logical ports (other than router
> ports)
> -          that are down.
> +          These flows are omitted for logical ports (other than router
> ports or
> +          <code>localport</code> ports) that are down.
>          </p>
>        </li>
>
> @@ -519,8 +519,8 @@ nd_na {
>          </pre>
>
>          <p>
> -          These flows are omitted for logical ports (other than router
> ports)
> -          that are down.
> +          These flows are omitted for logical ports (other than router
> ports or
> +          <code>localport</code> ports) that are down.
>          </p>
>        </li>
>
> diff --git a/ovn/northd/ovn-northd.c b/ovn/northd/ovn-northd.c
> index 83db75338557..a3bd859afe58 100644
> --- a/ovn/northd/ovn-northd.c
> +++ b/ovn/northd/ovn-northd.c
> @@ -3305,9 +3305,11 @@ build_lswitch_flows(struct hmap *datapaths, struct
> hmap *ports,
>          /*
>           * Add ARP/ND reply flows if either the
>           *  - port is up or
> -         *  - port type is router
> +         *  - port type is router or
> +         *  - port type is localport
>           */
> -        if (!lsp_is_up(op->nbsp) && strcmp(op->nbsp->type, "router")) {
> +        if (!lsp_is_up(op->nbsp) && strcmp(op->nbsp->type, "router") &&
> +            strcmp(op->nbsp->type, "localport")) {
>              continue;
>          }
>
> diff --git a/ovn/ovn-architecture.7.xml b/ovn/ovn-architecture.7.xml
> index d8114f1f9dee..3983a93ee114 100644
> --- a/ovn/ovn-architecture.7.xml
> +++ b/ovn/ovn-architecture.7.xml
> @@ -409,6 +409,20 @@
>            logical patch ports at each such point of connectivity, one on
>            each side.
>          </li>
> +        <li>
> +          <dfn>Localport ports</dfn> represent the points of local
> +          connectivity between logical switches and VIFs. These ports are
> +          present in every chassis (not bound to any particular one) and
> +          traffic from them will never go through a tunnel. A
> +          <code>localport</code> is expected to only generate traffic
> destined
> +          for a local destination, typically in response to a request it
> +          received.
> +          One use case is how OpenStack Neutron uses a
> <code>localport</code>
> +          port for serving metadata to VM's residing on every hypervisor.
> A
> +          metadata proxy process is attached to this port on every host
> and all
> +          VM's within the same network will reach it at the same IP/MAC
> address
> +          without any traffic being sent over a tunnel.
> +        </li>
>        </ul>
>      </li>
>    </ul>
> @@ -993,15 +1007,31 @@
>          hypervisor, in the same way as for unicast destinations.  If a
>          multicast group includes a logical port or ports on the local
>          hypervisor, then its actions also resubmit to table 33.  Table 32
> also
> -        includes a fallback flow that resubmits to table 33 if there is no
> -        other match.  Table 32 also contains a higher priority rule to
> match
> -        packets received from VXLAN tunnels, based on flag
> MLF_RCV_FROM_VXLAN
> -        and resubmit these packets to table 33 for local delivery. Packets
> -        received from VXLAN tunnels reach here because of a lack of
> logical
> -        output port field in the tunnel key and thus these packets needed
> to
> -        be submitted to table 16 to determine the output port.
> +        includes:
>        </p>
>
> +      <ul>
> +        <li>
> +          A higher-priority rule to match packets received from VXLAN
> tunnels,
> +          based on flag MLF_RCV_FROM_VXLAN, and resubmit these packets to
> table
> +          33 for local delivery.  Packets received from VXLAN tunnels
> reach
> +          here because of a lack of logical output port field in the
> tunnel key
> +          and thus these packets needed to be submitted to table 16 to
> +          determine the output port.
> +        </li>
> +        <li>
> +          A higher-priority rule to match packets received from ports of
> type
> +          <code>localport</code>, based on the logical input port, and
> resubmit
> +          these packets to table 33 for local delivery.  Ports of type
> +          <code>localport</code> exist on every hypervisor and by
> definition
> +          their traffic should never go out through a tunnel.
> +        </li>
> +        <li>
> +          A fallback flow that resubmits to table 33 if there is no other
> +          match.
> +        </li>
> +      </ul>
> +
>        <p>
>          Flows in table 33 resemble those in table 32 but for logical
> ports that
>          reside locally rather than remotely.  For unicast logical output
> ports
> diff --git a/ovn/ovn-nb.xml b/ovn/ovn-nb.xml
> index f5be9e24b7bc..eb348fe59c4b 100644
> --- a/ovn/ovn-nb.xml
> +++ b/ovn/ovn-nb.xml
> @@ -283,6 +283,15 @@
>              to model direct connectivity to an existing network.
>            </dd>
>
> +          <dt><code>localport</code></dt>
> +          <dd>
> +            A connection to a local VIF. Traffic that arrives on a
> +            <code>localport</code> is never forwarded over a tunnel to
> another
> +            chassis. These ports are present on every chassis and have
> the same
> +            address in all of them. This is used to model connectivity to
> local
> +            services that run on every hypervisor.
> +          </dd>
> +
>            <dt><code>l2gateway</code></dt>
>            <dd>
>              A connection to a physical network.
> diff --git a/ovn/ovn-sb.xml b/ovn/ovn-sb.xml
> index 387adb8069d3..f3c321222c0d 100644
> --- a/ovn/ovn-sb.xml
> +++ b/ovn/ovn-sb.xml
> @@ -1802,6 +1802,11 @@ tcp.flags = RST;
>              connectivity to the corresponding physical network.
>            </dd>
>
> +          <dt>localport</dt>
> +          <dd>
> +            Always empty.  A localport port is present on every chassis.
> +          </dd>
> +
>            <dt>l3gateway</dt>
>            <dd>
>              The physical location of the L3 gateway.  To successfully
> identify a
> @@ -1882,6 +1887,15 @@ tcp.flags = RST;
>              to model direct connectivity to an existing network.
>            </dd>
>
> +          <dt><code>localport</code></dt>
> +          <dd>
> +            A connection to a local VIF. Traffic that arrives on a
> +            <code>localport</code> is never forwarded over a tunnel to
> another
> +            chassis. These ports are present on every chassis and have
> the same
> +            address in all of them. This is used to model connectivity to
> local
> +            services that run on every hypervisor.
> +          </dd>
> +
>            <dt><code>l2gateway</code></dt>
>            <dd>
>              An L2 connection to a physical network.  The chassis this
> diff --git a/tests/ovn.at b/tests/ovn.at
> index b30315e57034..8abf45ae7701 100644
> --- a/tests/ovn.at
> +++ b/tests/ovn.at
> @@ -7374,3 +7374,125 @@ OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected])
>  OVN_CLEANUP([hv1],[hv2])
>
>  AT_CLEANUP
> +
> +AT_SETUP([ovn -- 2 HVs, 1 lport/HV, localport ports])
> +AT_SKIP_IF([test $HAVE_PYTHON = no])
> +ovn_start
> +
> +ovn-nbctl ls-add ls1
> +
> +# Add localport to the switch
> +ovn-nbctl lsp-add ls1 lp01
> +ovn-nbctl lsp-set-addresses lp01 f0:00:00:00:00:01
> +ovn-nbctl lsp-set-type lp01 localport
> +
> +net_add n1
> +
> +for i in 1 2; do
> +    sim_add hv$i
> +    as hv$i
> +    ovs-vsctl add-br br-phys
> +    ovn_attach n1 br-phys 192.168.0.$i
> +    ovs-vsctl add-port br-int vif01 -- \
> +        set Interface vif01 external-ids:iface-id=lp01 \
> +                              options:tx_pcap=hv${i}/vif01-tx.pcap \
> +                              options:rxq_pcap=hv${i}/vif01-rx.pcap \
> +                              ofport-request=${i}0
> +
> +    ovs-vsctl add-port br-int vif${i}1 -- \
> +        set Interface vif${i}1 external-ids:iface-id=lp${i}1 \
> +                              options:tx_pcap=hv${i}/vif${i}1-tx.pcap \
> +                              options:rxq_pcap=hv${i}/vif${i}1-rx.pcap \
> +                              ofport-request=${i}1
> +
> +    ovn-nbctl lsp-add ls1 lp${i}1
> +    ovn-nbctl lsp-set-addresses lp${i}1 f0:00:00:00:00:${i}1
> +    ovn-nbctl lsp-set-port-security lp${i}1 f0:00:00:00:00:${i}1
> +
> +        OVS_WAIT_UNTIL([test x`ovn-nbctl lsp-get-up lp${i}1` = xup])
> +done
> +
> +ovn-nbctl --wait=sb sync
> +ovn-sbctl dump-flows
> +
> +ovn_populate_arp
> +
> +# Given the name of a logical port, prints the name of the hypervisor
> +# on which it is located.
> +vif_to_hv() {
> +    echo hv${1%?}
> +}
> +#
> +# test_packet INPORT DST SRC ETHTYPE EOUT LOUT DEFHV
> +#
> +# This shell function causes a packet to be received on INPORT.  The
> packet's
> +# content has Ethernet destination DST and source SRC (each exactly 12 hex
> +# digits) and Ethernet type ETHTYPE (4 hex digits).  INPORT is specified
> as
> +# logical switch port numbers, e.g. 11 for vif11.
> +#
> +# EOUT is the end-to-end output port, that is, where the packet will end
> up
> +# after possibly bouncing through one or more localnet ports.  LOUT is the
> +# logical output port, which might be a localnet port, as seen by
> ovn-trace
> +# (which doesn't know what localnet ports are connected to and therefore
> can't
> +# figure out the end-to-end answer).
> +#
> +# DEFHV is the default hypervisor from where the packet is going to be
> sent
> +# if the source port is a localport.
> +for i in 1 2; do
> +    for j in 0 1; do
> +        : > $i$j.expected
> +    done
> +done
> +test_packet() {
> +    local inport=$1 dst=$2 src=$3 eth=$4 eout=$5 lout=$6 defhv=$7
> +    echo "$@"
> +
> +    # First try tracing the packet.
> +    uflow="inport==\"lp$inport\" && eth.dst==$dst && eth.src==$src &&
> eth.type==0x$eth"
> +    if test $lout != drop; then
> +        echo "output(\"$lout\");"
> +    fi > expout
> +    AT_CAPTURE_FILE([trace])
> +    AT_CHECK([ovn-trace --all ls1 "$uflow" | tee trace | sed '1,/Minimal
> trace/d'], [0], [expout])
> +
> +    # Then actually send a packet, for an end-to-end test.
> +    local packet=$(echo $dst$src | sed 's/://g')${eth}
> +    hv=`vif_to_hv $inport`
> +    # If hypervisor 0 (localport) use the defhv parameter
> +    if test $hv == hv0; then
> +        hv=$defhv
> +    fi
> +    vif=vif$inport
> +    as $hv ovs-appctl netdev-dummy/receive $vif $packet
> +    if test $eout != drop; then
> +        echo $packet >> ${eout#lp}.expected
> +    fi
> +}
> +
> +
> +# lp11 and lp21 are on different hypervisors
> +test_packet 11 f0:00:00:00:00:21 f0:00:00:00:00:11 1121 lp21 lp21
> +test_packet 21 f0:00:00:00:00:11 f0:00:00:00:00:21 2111 lp11 lp11
> +
> +# Both VIFs should be able to reach the localport on their own HV
> +test_packet 11 f0:00:00:00:00:01 f0:00:00:00:00:11 1101 lp01 lp01
> +test_packet 21 f0:00:00:00:00:01 f0:00:00:00:00:21 2101 lp01 lp01
> +
> +# Packet sent from localport on same hv should reach the vif
> +test_packet 01 f0:00:00:00:00:11 f0:00:00:00:00:01 0111 lp11 lp11 hv1
> +test_packet 01 f0:00:00:00:00:21 f0:00:00:00:00:01 0121 lp21 lp21 hv2
> +
> +# Packet sent from localport on different hv should be dropped
> +test_packet 01 f0:00:00:00:00:21 f0:00:00:00:00:01 0121 drop lp21 hv1
> +test_packet 01 f0:00:00:00:00:11 f0:00:00:00:00:01 0111 drop lp11 hv2
> +
> +# Now check the packets actually received against the ones expected.
> +for i in 1 2; do
> +    for j in 0 1; do
> +        OVN_CHECK_PACKETS([hv$i/vif$i$j-tx.pcap], [$i$j.expected])
> +    done
> +done
> +
> +OVN_CLEANUP([hv1],[hv2])
> +
> +AT_CLEANUP
> --
> 2.10.2
>
>
diff mbox

Patch

diff --git a/ovn/controller/physical.c b/ovn/controller/physical.c
index c98b3053130c..2172dd849893 100644
--- a/ovn/controller/physical.c
+++ b/ovn/controller/physical.c
@@ -293,8 +293,7 @@  consider_port_binding(enum mf_field_id mff_ovn_geneve,
                       const struct sbrec_port_binding *binding,
                       const struct sbrec_chassis *chassis,
                       struct ofpbuf *ofpacts_p,
-                      struct hmap *flow_table,
-                      const struct sset *local_lports)
+                      struct hmap *flow_table)
 {
     uint32_t dp_key = binding->datapath->tunnel_key;
     uint32_t port_key = binding->tunnel_key;
@@ -602,32 +601,6 @@  consider_port_binding(enum mf_field_id mff_ovn_geneve,
     } else {
         /* Remote port connected by tunnel */
 
-        /* Table 32, priority 150.
-         * =======================
-         *
-         * Drop traffic originated from a localport to a remote destination.
-         */
-        const char *localport;
-        SSET_FOR_EACH (localport, local_lports) {
-            /* Iterate over all local logical ports and insert a drop
-             * rule with higher priority for every localport in this
-             * datapath. */
-            const struct sbrec_port_binding *pb = lport_lookup_by_name(
-                lports, localport);
-            if (pb && pb->datapath->tunnel_key == dp_key &&
-                !strcmp(pb->type, "localport")) {
-                match_init_catchall(&match);
-                ofpbuf_clear(ofpacts_p);
-                /* Match localport logical in_port. */
-                match_set_reg(&match, MFF_LOG_INPORT - MFF_REG0,
-                              pb->tunnel_key);
-                /* Match MFF_LOG_DATAPATH, MFF_LOG_OUTPORT. */
-                match_set_metadata(&match, htonll(dp_key));
-                match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
-                ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 150, 0,
-                                &match, ofpacts_p);
-            }
-        }
         /* Table 32, priority 100.
          * =======================
          *
@@ -919,7 +892,7 @@  physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
     SBREC_PORT_BINDING_FOR_EACH (binding, ctx->ovnsb_idl) {
         consider_port_binding(mff_ovn_geneve, ct_zones, lports,
                               local_datapaths, binding, chassis,
-                              &ofpacts, flow_table, local_lports);
+                              &ofpacts, flow_table);
     }
 
     /* Handle output to multicast groups, in tables 32 and 33. */
@@ -1016,15 +989,40 @@  physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
      */
     struct match match;
     match_init_catchall(&match);
-    ofpbuf_clear(&ofpacts);
     match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0,
                          MLF_RCV_FROM_VXLAN, MLF_RCV_FROM_VXLAN);
 
     /* Resubmit to table 33. */
+    ofpbuf_clear(&ofpacts);
     put_resubmit(OFTABLE_LOCAL_OUTPUT, &ofpacts);
     ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 150, 0,
                     &match, &ofpacts);
 
+    /* Table 32, priority 150.
+     * =======================
+     *
+     * Handles packets received from ports of type "localport".  These ports
+     * are present on every hypervisor.  Traffic that originates at one should
+     * never go over a tunnel to a remote hypervisor, so resubmit them to table
+     * 33 for local delivery. */
+    match_init_catchall(&match);
+    ofpbuf_clear(&ofpacts);
+    put_resubmit(OFTABLE_LOCAL_OUTPUT, &ofpacts);
+    const char *localport;
+    SSET_FOR_EACH (localport, local_lports) {
+        /* Iterate over all local logical ports and insert a drop
+         * rule with higher priority for every localport in this
+         * datapath. */
+        const struct sbrec_port_binding *pb = lport_lookup_by_name(
+            lports, localport);
+        if (pb && !strcmp(pb->type, "localport")) {
+            match_set_reg(&match, MFF_LOG_INPORT - MFF_REG0, pb->tunnel_key);
+            match_set_metadata(&match, htonll(pb->datapath->tunnel_key));
+            ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 150, 0,
+                            &match, &ofpacts);
+        }
+    }
+
     /* Table 32, Priority 0.
      * =======================
      *
diff --git a/ovn/ovn-architecture.7.xml b/ovn/ovn-architecture.7.xml
index 2d2d0cc88435..3983a93ee114 100644
--- a/ovn/ovn-architecture.7.xml
+++ b/ovn/ovn-architecture.7.xml
@@ -1000,23 +1000,38 @@ 
         hypervisor.  Each flow's actions implement sending a packet to the port
         it matches.  For unicast logical output ports on remote hypervisors,
         the actions set the tunnel key to the correct value, then send the
-        packet on the tunnel port to the correct hypervisor (unless the packet
-        comes from a localport, in which case it will be dropped). (When the
-        remote hypervisor receives the packet, table 0 there will recognize it
-        as a tunneled packet and pass it along to table 33.)  For multicast
-        logical output ports, the actions send one copy of the packet to each
-        remote hypervisor, in the same way as for unicast destinations.  If a
+        packet on the tunnel port to the correct hypervisor.  (When the remote
+        hypervisor receives the packet, table 0 there will recognize it as a
+        tunneled packet and pass it along to table 33.)  For multicast logical
+        output ports, the actions send one copy of the packet to each remote
+        hypervisor, in the same way as for unicast destinations.  If a
         multicast group includes a logical port or ports on the local
         hypervisor, then its actions also resubmit to table 33.  Table 32 also
-        includes a fallback flow that resubmits to table 33 if there is no
-        other match.  Table 32 also contains a higher priority rule to match
-        packets received from VXLAN tunnels, based on flag MLF_RCV_FROM_VXLAN
-        and resubmit these packets to table 33 for local delivery. Packets
-        received from VXLAN tunnels reach here because of a lack of logical
-        output port field in the tunnel key and thus these packets needed to
-        be submitted to table 16 to determine the output port.
+        includes:
       </p>
 
+      <ul>
+        <li>
+          A higher-priority rule to match packets received from VXLAN tunnels,
+          based on flag MLF_RCV_FROM_VXLAN, and resubmit these packets to table
+          33 for local delivery.  Packets received from VXLAN tunnels reach
+          here because of a lack of logical output port field in the tunnel key
+          and thus these packets needed to be submitted to table 16 to
+          determine the output port.
+        </li>
+        <li>
+          A higher-priority rule to match packets received from ports of type
+          <code>localport</code>, based on the logical input port, and resubmit
+          these packets to table 33 for local delivery.  Ports of type
+          <code>localport</code> exist on every hypervisor and by definition
+          their traffic should never go out through a tunnel.
+        </li>
+        <li>
+          A fallback flow that resubmits to table 33 if there is no other
+          match.
+        </li>
+      </ul>
+
       <p>
         Flows in table 33 resemble those in table 32 but for logical ports that
         reside locally rather than remotely.  For unicast logical output ports

--8<--------------------------cut here-------------------------->8--

From: Daniel Alvarez <dalvarez@redhat.com>
Date: Wed, 10 May 2017 08:35:45 +0000
Subject: [PATCH] OVN localport type support

This patch introduces a new type of OVN ports called "localport".
These ports will be present in every hypervisor and may have the
same IP/MAC addresses. They are not bound to any chassis and traffic
to these ports will never go through a tunnel.

Its main use case is the OpenStack metadata API support which relies
on a local agent running on every hypervisor and serving metadata to
VM's locally. This service is described in detail at [0].

An example to illustrate the purpose of this patch:

- One logical switch sw0 with 2 ports (p1, p2) and 1 localport (lp)
- Two hypervisors: HV1 and HV2
- p1 in HV1 (OVS port with external-id:iface-id="p1")
- p2 in HV2 (OVS port with external-id:iface-id="p2")
- lp in both hypevisors (OVS port with external-id:iface-id="lp")
- p1 should be able to reach p2 and viceversa
- lp on HV1 should be able to reach p1 but not p2
- lp on HV2 should be able to reach p2 but not p1

Explicit drop rules are inserted in table 32 with priority 150
in order to prevent traffic originated at a localport to go over
a tunnel.

[0] https://review.openstack.org/#/c/452811/

Signed-off-by: Daniel Alvarez <dalvarez@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
---
 ovn/controller/binding.c        |   3 +-
 ovn/controller/ovn-controller.c |   2 +-
 ovn/controller/physical.c       |  30 +++++++++-
 ovn/controller/physical.h       |   4 +-
 ovn/northd/ovn-northd.8.xml     |   8 +--
 ovn/northd/ovn-northd.c         |   6 +-
 ovn/ovn-architecture.7.xml      |  44 ++++++++++++---
 ovn/ovn-nb.xml                  |   9 +++
 ovn/ovn-sb.xml                  |  14 +++++
 tests/ovn.at                    | 122 ++++++++++++++++++++++++++++++++++++++++
 10 files changed, 224 insertions(+), 18 deletions(-)

diff --git a/ovn/controller/binding.c b/ovn/controller/binding.c
index 95e9deb322b4..83a75434ec79 100644
--- a/ovn/controller/binding.c
+++ b/ovn/controller/binding.c
@@ -380,7 +380,8 @@  consider_local_datapath(struct controller_ctx *ctx,
         if (iface_rec && qos_map && ctx->ovs_idl_txn) {
             get_qos_params(binding_rec, qos_map);
         }
-        our_chassis = true;
+	if(strcmp(binding_rec->type, "localport"))
+            our_chassis = true;
     } else if (!strcmp(binding_rec->type, "l2gateway")) {
         const char *chassis_id = smap_get(&binding_rec->options,
                                           "l2gateway-chassis");
diff --git a/ovn/controller/ovn-controller.c b/ovn/controller/ovn-controller.c
index f22551d6dcf3..0f4dd35723e6 100644
--- a/ovn/controller/ovn-controller.c
+++ b/ovn/controller/ovn-controller.c
@@ -655,7 +655,7 @@  main(int argc, char *argv[])
 
                     physical_run(&ctx, mff_ovn_geneve,
                                  br_int, chassis, &ct_zones, &lports,
-                                 &flow_table, &local_datapaths);
+                                 &flow_table, &local_datapaths, &local_lports);
 
                     ofctrl_put(&flow_table, &pending_ct_zones,
                                get_nb_cfg(ctx.ovnsb_idl));
diff --git a/ovn/controller/physical.c b/ovn/controller/physical.c
index 457fc45414bd..2172dd849893 100644
--- a/ovn/controller/physical.c
+++ b/ovn/controller/physical.c
@@ -769,7 +769,8 @@  physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
              const struct ovsrec_bridge *br_int,
              const struct sbrec_chassis *chassis,
              const struct simap *ct_zones, struct lport_index *lports,
-             struct hmap *flow_table, struct hmap *local_datapaths)
+             struct hmap *flow_table, struct hmap *local_datapaths,
+             const struct sset *local_lports)
 {
 
     /* This bool tracks physical mapping changes. */
@@ -988,15 +989,40 @@  physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
      */
     struct match match;
     match_init_catchall(&match);
-    ofpbuf_clear(&ofpacts);
     match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0,
                          MLF_RCV_FROM_VXLAN, MLF_RCV_FROM_VXLAN);
 
     /* Resubmit to table 33. */
+    ofpbuf_clear(&ofpacts);
     put_resubmit(OFTABLE_LOCAL_OUTPUT, &ofpacts);
     ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 150, 0,
                     &match, &ofpacts);
 
+    /* Table 32, priority 150.
+     * =======================
+     *
+     * Handles packets received from ports of type "localport".  These ports
+     * are present on every hypervisor.  Traffic that originates at one should
+     * never go over a tunnel to a remote hypervisor, so resubmit them to table
+     * 33 for local delivery. */
+    match_init_catchall(&match);
+    ofpbuf_clear(&ofpacts);
+    put_resubmit(OFTABLE_LOCAL_OUTPUT, &ofpacts);
+    const char *localport;
+    SSET_FOR_EACH (localport, local_lports) {
+        /* Iterate over all local logical ports and insert a drop
+         * rule with higher priority for every localport in this
+         * datapath. */
+        const struct sbrec_port_binding *pb = lport_lookup_by_name(
+            lports, localport);
+        if (pb && !strcmp(pb->type, "localport")) {
+            match_set_reg(&match, MFF_LOG_INPORT - MFF_REG0, pb->tunnel_key);
+            match_set_metadata(&match, htonll(pb->datapath->tunnel_key));
+            ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 150, 0,
+                            &match, &ofpacts);
+        }
+    }
+
     /* Table 32, Priority 0.
      * =======================
      *
diff --git a/ovn/controller/physical.h b/ovn/controller/physical.h
index e2debed89362..66aa80e2d998 100644
--- a/ovn/controller/physical.h
+++ b/ovn/controller/physical.h
@@ -32,6 +32,7 @@  struct hmap;
 struct ovsdb_idl;
 struct ovsrec_bridge;
 struct simap;
+struct sset;
 
 /* OVN Geneve option information.
  *
@@ -45,6 +46,7 @@  void physical_run(struct controller_ctx *, enum mf_field_id mff_ovn_geneve,
                   const struct ovsrec_bridge *br_int,
                   const struct sbrec_chassis *chassis,
                   const struct simap *ct_zones, struct lport_index *,
-                  struct hmap *flow_table, struct hmap *local_datapaths);
+                  struct hmap *flow_table, struct hmap *local_datapaths,
+                  const struct sset *local_lports);
 
 #endif /* ovn/physical.h */
diff --git a/ovn/northd/ovn-northd.8.xml b/ovn/northd/ovn-northd.8.xml
index c0b4c5eb0ead..7ff524508c73 100644
--- a/ovn/northd/ovn-northd.8.xml
+++ b/ovn/northd/ovn-northd.8.xml
@@ -492,8 +492,8 @@  output;
         </pre>
 
         <p>
-          These flows are omitted for logical ports (other than router ports)
-          that are down.
+          These flows are omitted for logical ports (other than router ports or
+          <code>localport</code> ports) that are down.
         </p>
       </li>
 
@@ -519,8 +519,8 @@  nd_na {
         </pre>
 
         <p>
-          These flows are omitted for logical ports (other than router ports)
-          that are down.
+          These flows are omitted for logical ports (other than router ports or
+          <code>localport</code> ports) that are down.
         </p>
       </li>
 
diff --git a/ovn/northd/ovn-northd.c b/ovn/northd/ovn-northd.c
index 83db75338557..a3bd859afe58 100644
--- a/ovn/northd/ovn-northd.c
+++ b/ovn/northd/ovn-northd.c
@@ -3305,9 +3305,11 @@  build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
         /*
          * Add ARP/ND reply flows if either the
          *  - port is up or
-         *  - port type is router
+         *  - port type is router or
+         *  - port type is localport
          */
-        if (!lsp_is_up(op->nbsp) && strcmp(op->nbsp->type, "router")) {
+        if (!lsp_is_up(op->nbsp) && strcmp(op->nbsp->type, "router") &&
+            strcmp(op->nbsp->type, "localport")) {
             continue;
         }
 
diff --git a/ovn/ovn-architecture.7.xml b/ovn/ovn-architecture.7.xml
index d8114f1f9dee..3983a93ee114 100644
--- a/ovn/ovn-architecture.7.xml
+++ b/ovn/ovn-architecture.7.xml
@@ -409,6 +409,20 @@ 
           logical patch ports at each such point of connectivity, one on
           each side.
         </li>
+        <li>
+          <dfn>Localport ports</dfn> represent the points of local
+          connectivity between logical switches and VIFs. These ports are
+          present in every chassis (not bound to any particular one) and
+          traffic from them will never go through a tunnel. A
+          <code>localport</code> is expected to only generate traffic destined
+          for a local destination, typically in response to a request it
+          received.
+          One use case is how OpenStack Neutron uses a <code>localport</code>
+          port for serving metadata to VM's residing on every hypervisor. A
+          metadata proxy process is attached to this port on every host and all
+          VM's within the same network will reach it at the same IP/MAC address
+          without any traffic being sent over a tunnel.
+        </li>
       </ul>
     </li>
   </ul>
@@ -993,15 +1007,31 @@ 
         hypervisor, in the same way as for unicast destinations.  If a
         multicast group includes a logical port or ports on the local
         hypervisor, then its actions also resubmit to table 33.  Table 32 also
-        includes a fallback flow that resubmits to table 33 if there is no
-        other match.  Table 32 also contains a higher priority rule to match
-        packets received from VXLAN tunnels, based on flag MLF_RCV_FROM_VXLAN
-        and resubmit these packets to table 33 for local delivery. Packets
-        received from VXLAN tunnels reach here because of a lack of logical
-        output port field in the tunnel key and thus these packets needed to
-        be submitted to table 16 to determine the output port.
+        includes:
       </p>
 
+      <ul>
+        <li>
+          A higher-priority rule to match packets received from VXLAN tunnels,
+          based on flag MLF_RCV_FROM_VXLAN, and resubmit these packets to table
+          33 for local delivery.  Packets received from VXLAN tunnels reach
+          here because of a lack of logical output port field in the tunnel key
+          and thus these packets needed to be submitted to table 16 to
+          determine the output port.
+        </li>
+        <li>
+          A higher-priority rule to match packets received from ports of type
+          <code>localport</code>, based on the logical input port, and resubmit
+          these packets to table 33 for local delivery.  Ports of type
+          <code>localport</code> exist on every hypervisor and by definition
+          their traffic should never go out through a tunnel.
+        </li>
+        <li>
+          A fallback flow that resubmits to table 33 if there is no other
+          match.
+        </li>
+      </ul>
+
       <p>
         Flows in table 33 resemble those in table 32 but for logical ports that
         reside locally rather than remotely.  For unicast logical output ports
diff --git a/ovn/ovn-nb.xml b/ovn/ovn-nb.xml
index f5be9e24b7bc..eb348fe59c4b 100644
--- a/ovn/ovn-nb.xml
+++ b/ovn/ovn-nb.xml
@@ -283,6 +283,15 @@ 
             to model direct connectivity to an existing network.
           </dd>
 
+          <dt><code>localport</code></dt>
+          <dd>
+            A connection to a local VIF. Traffic that arrives on a
+            <code>localport</code> is never forwarded over a tunnel to another
+            chassis. These ports are present on every chassis and have the same
+            address in all of them. This is used to model connectivity to local
+            services that run on every hypervisor.
+          </dd>
+
           <dt><code>l2gateway</code></dt>
           <dd>
             A connection to a physical network.
diff --git a/ovn/ovn-sb.xml b/ovn/ovn-sb.xml
index 387adb8069d3..f3c321222c0d 100644
--- a/ovn/ovn-sb.xml
+++ b/ovn/ovn-sb.xml
@@ -1802,6 +1802,11 @@  tcp.flags = RST;
             connectivity to the corresponding physical network.
           </dd>
 
+          <dt>localport</dt>
+          <dd>
+            Always empty.  A localport port is present on every chassis.
+          </dd>
+
           <dt>l3gateway</dt>
           <dd>
             The physical location of the L3 gateway.  To successfully identify a
@@ -1882,6 +1887,15 @@  tcp.flags = RST;
             to model direct connectivity to an existing network.
           </dd>
 
+          <dt><code>localport</code></dt>
+          <dd>
+            A connection to a local VIF. Traffic that arrives on a
+            <code>localport</code> is never forwarded over a tunnel to another
+            chassis. These ports are present on every chassis and have the same
+            address in all of them. This is used to model connectivity to local
+            services that run on every hypervisor.
+          </dd>
+
           <dt><code>l2gateway</code></dt>
           <dd>
             An L2 connection to a physical network.  The chassis this
diff --git a/tests/ovn.at b/tests/ovn.at
index b30315e57034..8abf45ae7701 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -7374,3 +7374,125 @@  OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected])
 OVN_CLEANUP([hv1],[hv2])
 
 AT_CLEANUP
+
+AT_SETUP([ovn -- 2 HVs, 1 lport/HV, localport ports])
+AT_SKIP_IF([test $HAVE_PYTHON = no])
+ovn_start
+
+ovn-nbctl ls-add ls1
+
+# Add localport to the switch
+ovn-nbctl lsp-add ls1 lp01
+ovn-nbctl lsp-set-addresses lp01 f0:00:00:00:00:01
+ovn-nbctl lsp-set-type lp01 localport
+
+net_add n1
+
+for i in 1 2; do
+    sim_add hv$i
+    as hv$i
+    ovs-vsctl add-br br-phys
+    ovn_attach n1 br-phys 192.168.0.$i
+    ovs-vsctl add-port br-int vif01 -- \
+        set Interface vif01 external-ids:iface-id=lp01 \
+                              options:tx_pcap=hv${i}/vif01-tx.pcap \
+                              options:rxq_pcap=hv${i}/vif01-rx.pcap \
+                              ofport-request=${i}0
+
+    ovs-vsctl add-port br-int vif${i}1 -- \
+        set Interface vif${i}1 external-ids:iface-id=lp${i}1 \
+                              options:tx_pcap=hv${i}/vif${i}1-tx.pcap \
+                              options:rxq_pcap=hv${i}/vif${i}1-rx.pcap \
+                              ofport-request=${i}1
+
+    ovn-nbctl lsp-add ls1 lp${i}1
+    ovn-nbctl lsp-set-addresses lp${i}1 f0:00:00:00:00:${i}1
+    ovn-nbctl lsp-set-port-security lp${i}1 f0:00:00:00:00:${i}1
+
+        OVS_WAIT_UNTIL([test x`ovn-nbctl lsp-get-up lp${i}1` = xup])
+done
+
+ovn-nbctl --wait=sb sync
+ovn-sbctl dump-flows
+
+ovn_populate_arp
+
+# Given the name of a logical port, prints the name of the hypervisor
+# on which it is located.
+vif_to_hv() {
+    echo hv${1%?}
+}
+#
+# test_packet INPORT DST SRC ETHTYPE EOUT LOUT DEFHV
+#
+# This shell function causes a packet to be received on INPORT.  The packet's
+# content has Ethernet destination DST and source SRC (each exactly 12 hex
+# digits) and Ethernet type ETHTYPE (4 hex digits).  INPORT is specified as
+# logical switch port numbers, e.g. 11 for vif11.
+#
+# EOUT is the end-to-end output port, that is, where the packet will end up
+# after possibly bouncing through one or more localnet ports.  LOUT is the
+# logical output port, which might be a localnet port, as seen by ovn-trace
+# (which doesn't know what localnet ports are connected to and therefore can't
+# figure out the end-to-end answer).
+#
+# DEFHV is the default hypervisor from where the packet is going to be sent
+# if the source port is a localport.
+for i in 1 2; do
+    for j in 0 1; do
+        : > $i$j.expected
+    done
+done
+test_packet() {
+    local inport=$1 dst=$2 src=$3 eth=$4 eout=$5 lout=$6 defhv=$7
+    echo "$@"
+
+    # First try tracing the packet.
+    uflow="inport==\"lp$inport\" && eth.dst==$dst && eth.src==$src && eth.type==0x$eth"
+    if test $lout != drop; then
+        echo "output(\"$lout\");"
+    fi > expout
+    AT_CAPTURE_FILE([trace])
+    AT_CHECK([ovn-trace --all ls1 "$uflow" | tee trace | sed '1,/Minimal trace/d'], [0], [expout])
+
+    # Then actually send a packet, for an end-to-end test.
+    local packet=$(echo $dst$src | sed 's/://g')${eth}
+    hv=`vif_to_hv $inport`
+    # If hypervisor 0 (localport) use the defhv parameter
+    if test $hv == hv0; then
+        hv=$defhv
+    fi
+    vif=vif$inport
+    as $hv ovs-appctl netdev-dummy/receive $vif $packet
+    if test $eout != drop; then
+        echo $packet >> ${eout#lp}.expected
+    fi
+}
+
+
+# lp11 and lp21 are on different hypervisors
+test_packet 11 f0:00:00:00:00:21 f0:00:00:00:00:11 1121 lp21 lp21
+test_packet 21 f0:00:00:00:00:11 f0:00:00:00:00:21 2111 lp11 lp11
+
+# Both VIFs should be able to reach the localport on their own HV
+test_packet 11 f0:00:00:00:00:01 f0:00:00:00:00:11 1101 lp01 lp01
+test_packet 21 f0:00:00:00:00:01 f0:00:00:00:00:21 2101 lp01 lp01
+
+# Packet sent from localport on same hv should reach the vif
+test_packet 01 f0:00:00:00:00:11 f0:00:00:00:00:01 0111 lp11 lp11 hv1
+test_packet 01 f0:00:00:00:00:21 f0:00:00:00:00:01 0121 lp21 lp21 hv2
+
+# Packet sent from localport on different hv should be dropped
+test_packet 01 f0:00:00:00:00:21 f0:00:00:00:00:01 0121 drop lp21 hv1
+test_packet 01 f0:00:00:00:00:11 f0:00:00:00:00:01 0111 drop lp11 hv2
+
+# Now check the packets actually received against the ones expected.
+for i in 1 2; do
+    for j in 0 1; do
+        OVN_CHECK_PACKETS([hv$i/vif$i$j-tx.pcap], [$i$j.expected])
+    done
+done
+
+OVN_CLEANUP([hv1],[hv2])
+
+AT_CLEANUP