diff mbox series

[ovs-dev,v2] ovn: OVN Support QoS meter

Message ID 20170920115234.3800-1-ligs@dtdream.com
State Not Applicable
Headers show
Series [ovs-dev,v2] ovn: OVN Support QoS meter | expand

Commit Message

Guoshuai Li Sept. 20, 2017, 11:52 a.m. UTC
ovn-northd modify:
add bandwidth column in NB's QOS table.
add QOS_METER stages in Logical switch ingress/egress.
add set_meter() action in SB's LFlow table.

ovn-controller modify:
add meter_table for meter action process openflow meter table.

This feature is only supported in DPDK.
---

v2: Fix Ingress/Egress Table id error.

---
 NEWS                            |   1 +
 include/ovn/actions.h           |  31 +++++++-
 ovn/controller/lflow.c          |   9 ++-
 ovn/controller/lflow.h          |   2 +
 ovn/controller/ofctrl.c         | 158 +++++++++++++++++++++++++++++++++++++++-
 ovn/controller/ofctrl.h         |   6 +-
 ovn/controller/ovn-controller.c |  25 ++++++-
 ovn/lib/actions.c               | 114 +++++++++++++++++++++++++++++
 ovn/northd/ovn-northd.8.xml     |  54 ++++++++++----
 ovn/northd/ovn-northd.c         | 108 +++++++++++++++++----------
 ovn/ovn-nb.ovsschema            |  11 ++-
 ovn/ovn-nb.xml                  |  16 ++++
 ovn/ovn-sb.xml                  |  15 ++++
 ovn/utilities/ovn-trace.c       |   4 +
 tests/ovn.at                    |   7 +-
 tests/test-ovn.c                |   8 ++
 16 files changed, 504 insertions(+), 65 deletions(-)

Comments

Miguel Angel Ajo Sept. 21, 2017, 11:03 a.m. UTC | #1
I thought we didn't have meters yet in OvS switch implementation (beyond
openflow protocol support) as per:

http://docs.openvswitch.org/en/latest/faq/qos/

Has this changed in master?

On Wed, Sep 20, 2017 at 1:52 PM, Guoshuai Li <ligs@dtdream.com> wrote:

> ovn-northd modify:
> add bandwidth column in NB's QOS table.
> add QOS_METER stages in Logical switch ingress/egress.
> add set_meter() action in SB's LFlow table.
>
> ovn-controller modify:
> add meter_table for meter action process openflow meter table.
>
> This feature is only supported in DPDK.
> ---
>
> v2: Fix Ingress/Egress Table id error.
>
> ---
>  NEWS                            |   1 +
>  include/ovn/actions.h           |  31 +++++++-
>  ovn/controller/lflow.c          |   9 ++-
>  ovn/controller/lflow.h          |   2 +
>  ovn/controller/ofctrl.c         | 158 ++++++++++++++++++++++++++++++
> +++++++++-
>  ovn/controller/ofctrl.h         |   6 +-
>  ovn/controller/ovn-controller.c |  25 ++++++-
>  ovn/lib/actions.c               | 114 +++++++++++++++++++++++++++++
>  ovn/northd/ovn-northd.8.xml     |  54 ++++++++++----
>  ovn/northd/ovn-northd.c         | 108 +++++++++++++++++----------
>  ovn/ovn-nb.ovsschema            |  11 ++-
>  ovn/ovn-nb.xml                  |  16 ++++
>  ovn/ovn-sb.xml                  |  15 ++++
>  ovn/utilities/ovn-trace.c       |   4 +
>  tests/ovn.at                    |   7 +-
>  tests/test-ovn.c                |   8 ++
>  16 files changed, 504 insertions(+), 65 deletions(-)
>
> diff --git a/NEWS b/NEWS
> index 6a5d2bf98..b97e7bff7 100644
> --- a/NEWS
> +++ b/NEWS
> @@ -59,6 +59,7 @@ v2.8.0 - xx xxx xxxx
>         gateway.
>       * Add support for ACL logging.
>       * ovn-northd now has native support for active-standby high
> availability.
> +     * Add support for QoS bandwidth limt with DPDK.
>     - Tracing with ofproto/trace now traces through recirculation.
>     - OVSDB:
>       * New support for role-based access control (see ovsdb-server(1)).
> diff --git a/include/ovn/actions.h b/include/ovn/actions.h
> index 0a04af7aa..8dbb895f3 100644
> --- a/include/ovn/actions.h
> +++ b/include/ovn/actions.h
> @@ -72,7 +72,8 @@ struct simap;
>      OVNACT(PUT_DHCPV6_OPTS,   ovnact_put_dhcp_opts)   \
>      OVNACT(SET_QUEUE,         ovnact_set_queue)       \
>      OVNACT(DNS_LOOKUP,        ovnact_dns_lookup)      \
> -    OVNACT(LOG,               ovnact_log)
> +    OVNACT(LOG,               ovnact_log)             \
> +    OVNACT(SET_METER,         ovnact_set_meter)
>
>  /* enum ovnact_type, with a member OVNACT_<ENUM> for each action. */
>  enum OVS_PACKED_ENUM ovnact_type {
> @@ -274,6 +275,13 @@ struct ovnact_log {
>      char *name;
>  };
>
> +/* OVNACT_SET_METER. */
> +struct ovnact_set_meter {
> +    struct ovnact ovnact;
> +    uint32_t rate;              /* 32-bit rate field. */
> +    uint32_t burst;             /* 32-bit burst rate field. */
> +};
> +
>  /* Internal use by the helpers below. */
>  void ovnact_init(struct ovnact *, enum ovnact_type, size_t len);
>  void *ovnact_put(struct ofpbuf *, enum ovnact_type, size_t len);
> @@ -350,6 +358,24 @@ struct group_info {
>                           * group_table's 'group_ids' bitmap. */
>  };
>
> +#define MAX_OVN_METERS 65535
> +
> +struct meter_table {
> +    unsigned long *meter_ids;  /* Used as a bitmap with value set
> +                                * for allocated meter ids in either
> +                                * desired_meters or existing_meters. */
> +    struct hmap desired_meters;
> +    struct hmap existing_meters;
> +};
> +
> +struct meter_info {
> +    struct hmap_node hmap_node;
> +    struct ds meter;
> +    uint32_t meter_id;
> +    bool new_meter_id;  /* 'True' if 'meter_id' was reserved from
> +                         * meter_table's 'meter_ids' bitmap. */
> +};
> +
>  enum action_opcode {
>      /* "arp { ...actions... }".
>       *
> @@ -484,6 +510,9 @@ struct ovnact_encode_params {
>      /* A struct to figure out the group_id for group actions. */
>      struct group_table *group_table;
>
> +    /* A struct to figure out the meter_id for meter actions. */
> +    struct meter_table *meter_table;
> +
>      /* OVN maps each logical flow table (ltable), one-to-one, onto a
> physical
>       * OpenFlow flow table (ptable).  A number of parameters describe this
>       * mapping and data related to flow tables:
> diff --git a/ovn/controller/lflow.c b/ovn/controller/lflow.c
> index 20a18c259..bf7a59f32 100644
> --- a/ovn/controller/lflow.c
> +++ b/ovn/controller/lflow.c
> @@ -62,6 +62,7 @@ static void consider_logical_flow(struct controller_ctx
> *ctx,
>                                    const struct sbrec_logical_flow *lflow,
>                                    const struct hmap *local_datapaths,
>                                    struct group_table *group_table,
> +                                  struct meter_table *meter_table,
>                                    const struct sbrec_chassis *chassis,
>                                    struct hmap *dhcp_opts,
>                                    struct hmap *dhcpv6_opts,
> @@ -143,6 +144,7 @@ add_logical_flows(struct controller_ctx *ctx,
>                    const struct chassis_index *chassis_index,
>                    const struct hmap *local_datapaths,
>                    struct group_table *group_table,
> +                  struct meter_table *meter_table,
>                    const struct sbrec_chassis *chassis,
>                    const struct shash *addr_sets,
>                    struct hmap *flow_table,
> @@ -170,7 +172,7 @@ add_logical_flows(struct controller_ctx *ctx,
>      SBREC_LOGICAL_FLOW_FOR_EACH (lflow, ctx->ovnsb_idl) {
>          consider_logical_flow(ctx, chassis_index,
>                                lflow, local_datapaths,
> -                              group_table, chassis,
> +                              group_table, meter_table, chassis,
>                                &dhcp_opts, &dhcpv6_opts, &conj_id_ofs,
>                                addr_sets, flow_table, active_tunnels,
>                                local_lport_ids);
> @@ -186,6 +188,7 @@ consider_logical_flow(struct controller_ctx *ctx,
>                        const struct sbrec_logical_flow *lflow,
>                        const struct hmap *local_datapaths,
>                        struct group_table *group_table,
> +                      struct meter_table *meter_table,
>                        const struct sbrec_chassis *chassis,
>                        struct hmap *dhcp_opts,
>                        struct hmap *dhcpv6_opts,
> @@ -256,6 +259,7 @@ consider_logical_flow(struct controller_ctx *ctx,
>          .is_switch = is_switch(ldp),
>          .is_gateway_router = is_gateway_router(ldp, local_datapaths),
>          .group_table = group_table,
> +        .meter_table = meter_table,
>
>          .pipeline = ingress ? OVNACT_P_INGRESS : OVNACT_P_EGRESS,
>          .ingress_ptable = OFTABLE_LOG_INGRESS_PIPELINE,
> @@ -428,13 +432,14 @@ lflow_run(struct controller_ctx *ctx,
>            const struct chassis_index *chassis_index,
>            const struct hmap *local_datapaths,
>            struct group_table *group_table,
> +          struct meter_table *meter_table,
>            const struct shash *addr_sets,
>            struct hmap *flow_table,
>            struct sset *active_tunnels,
>            struct sset *local_lport_ids)
>  {
>      add_logical_flows(ctx, chassis_index, local_datapaths,
> -                      group_table, chassis, addr_sets, flow_table,
> +                      group_table, meter_table, chassis, addr_sets,
> flow_table,
>                        active_tunnels, local_lport_ids);
>      add_neighbor_flows(ctx, flow_table);
>  }
> diff --git a/ovn/controller/lflow.h b/ovn/controller/lflow.h
> index bfb7415e2..ad8d0a4c7 100644
> --- a/ovn/controller/lflow.h
> +++ b/ovn/controller/lflow.h
> @@ -38,6 +38,7 @@
>  struct chassis_index;
>  struct controller_ctx;
>  struct group_table;
> +struct meter_table;
>  struct hmap;
>  struct sbrec_chassis;
>  struct simap;
> @@ -67,6 +68,7 @@ void lflow_run(struct controller_ctx *,
>                 const struct chassis_index *,
>                 const struct hmap *local_datapaths,
>                 struct group_table *group_table,
> +               struct meter_table *meter_table,
>                 const struct shash *addr_sets,
>                 struct hmap *flow_table,
>                 struct sset *active_tunnels,
> diff --git a/ovn/controller/ofctrl.c b/ovn/controller/ofctrl.c
> index fc88a410b..d92258558 100644
> --- a/ovn/controller/ofctrl.c
> +++ b/ovn/controller/ofctrl.c
> @@ -133,6 +133,9 @@ static struct hmap installed_flows;
>  /* A reference to the group_table. */
>  static struct group_table *groups;
>
> +/* A reference to the meter_table. */
> +static struct meter_table *meters;
> +
>  /* MFF_* field ID for our Geneve option.  In S_TLV_TABLE_MOD_SENT, this is
>   * the option we requested (we don't know whether we obtained it yet).  In
>   * S_CLEAR_FLOWS or S_UPDATE_FLOWS, this is really the option we have. */
> @@ -144,13 +147,15 @@ static struct ofpbuf *encode_flow_mod(struct
> ofputil_flow_mod *);
>
>  static struct ofpbuf *encode_group_mod(const struct ofputil_group_mod *);
>
> +static struct ofpbuf *encode_meter_mod(const struct ofputil_meter_mod *);
> +
>  static void ovn_flow_table_clear(struct hmap *flow_table);
>  static void ovn_flow_table_destroy(struct hmap *flow_table);
>
>  static void ofctrl_recv(const struct ofp_header *, enum ofptype);
>
>  void
> -ofctrl_init(struct group_table *group_table)
> +ofctrl_init(struct group_table *group_table, struct meter_table
> *meter_table)
>  {
>      swconn = rconn_create(5, 0, DSCP_DEFAULT, 1 << OFP13_VERSION);
>      tx_counter = rconn_packet_counter_create();
> @@ -158,6 +163,7 @@ ofctrl_init(struct group_table *group_table)
>      ovs_list_init(&flow_updates);
>      ovn_init_symtab(&symtab);
>      groups = group_table;
> +    meters = meter_table;
>  }
>
>  /* S_NEW, for a new connection.
> @@ -388,6 +394,18 @@ run_S_CLEAR_FLOWS(void)
>          ovn_group_table_clear(groups, true);
>      }
>
> +    /* Send a meter_mod to delete all meters. */
> +    struct ofputil_meter_mod mm;
> +    memset(&mm, 0, sizeof mm);
> +    mm.command = OFPMC13_DELETE;
> +    mm.meter.meter_id = OFPM13_ALL;
> +    queue_msg(encode_meter_mod(&mm));
> +
> +    /* Clear existing meters, to match the state of the switch. */
> +    if (meters) {
> +        ovn_meter_table_clear(meters, true);
> +    }
> +
>      /* All flow updates are irrelevant now. */
>      struct ofctrl_flow_update *fup, *next;
>      LIST_FOR_EACH_SAFE (fup, next, list_node, &flow_updates) {
> @@ -797,7 +815,60 @@ add_group_mod(const struct ofputil_group_mod *gm,
> struct ovs_list *msgs)
>      struct ofpbuf *msg = encode_group_mod(gm);
>      ovs_list_push_back(msgs, &msg->list_node);
>  }
> -
> +
> +/* meter_table. */
> +
> +/* Finds and returns a meter_info in 'existing_meters' whose key is
> identical
> + * to 'target''s key, or NULL if there is none. */
> +static struct meter_info *
> +ovn_meter_lookup(struct hmap *exisiting_meters,
> +                 const struct meter_info *target)
> +{
> +    struct meter_info *e;
> +
> +    HMAP_FOR_EACH_WITH_HASH(e, hmap_node, target->hmap_node.hash,
> +                            exisiting_meters) {
> +        if (e->meter_id == target->meter_id) {
> +            return e;
> +        }
> +   }
> +    return NULL;
> +}
> +
> +/* Clear either desired_meters or existing_meters in meter_table. */
> +void
> +ovn_meter_table_clear(struct meter_table *meter_table, bool existing)
> +{
> +    struct meter_info *m, *next;
> +    struct hmap *target_meter = existing
> +                                ? &meter_table->existing_meters
> +                                : &meter_table->desired_meters;
> +
> +    HMAP_FOR_EACH_SAFE (m, next, hmap_node, target_meter) {
> +        hmap_remove(target_meter, &m->hmap_node);
> +        /* Don't unset bitmap for desired meter_info if the meter_id
> +         * was not freshly reserved. */
> +        if (existing || m->new_meter_id) {
> +            bitmap_set0(meter_table->meter_ids, m->meter_id);
> +        }
> +        ds_destroy(&m->meter);
> +        free(m);
> +    }
> +}
> +
> +static struct ofpbuf *
> +encode_meter_mod(const struct ofputil_meter_mod *mm)
> +{
> +    return ofputil_encode_meter_mod(OFP13_VERSION, mm);
> +}
> +
> +static void
> +add_meter_mod(const struct ofputil_meter_mod *mm, struct ovs_list *msgs)
> +{
> +    struct ofpbuf *msg = encode_meter_mod(mm);
> +    ovs_list_push_back(msgs, &msg->list_node);
> +}
> +
>  static void
>  add_ct_flush_zone(uint16_t zone_id, struct ovs_list *msgs)
>  {
> @@ -833,6 +904,12 @@ ofctrl_can_put(void)
>   * 'groups->desired_groups' and frees them. (The hmap itself isn't
>   * destroyed.)
>   *
> + * Replaces the meter table on the switch, if possible, by the contents of
> + * 'meters->desired_meters'.  Regardless of whether the meter table
> + * is updated, this deletes all the meters from the
> + * 'meters->desired_meters' and frees them. (The hmap itself isn't
> + * destroyed.)
> + *
>   * Sends conntrack flush messages to each zone in 'pending_ct_zones' that
>   * is in the CT_ZONE_OF_QUEUED state and then moves the zone into the
>   * CT_ZONE_OF_SENT state.
> @@ -891,6 +968,35 @@ ofctrl_put(struct hmap *flow_table, struct shash
> *pending_ct_zones,
>          }
>      }
>
> +    /* Iterate through all the desired meters. If there are new ones,
> +     * add them to the switch. */
> +    struct meter_info *desired_meter;
> +    HMAP_FOR_EACH(desired_meter, hmap_node, &meters->desired_meters) {
> +        if (!ovn_meter_lookup(&meters->existing_meters, desired_meter)
> +            && desired_meter->meter_id) {
> +            /* Create and install new meter. */
> +            struct ofputil_meter_mod mm;
> +            enum ofputil_protocol usable_protocols;
> +            char *error;
> +            struct ds meter_string = DS_EMPTY_INITIALIZER;
> +            ds_put_format(&meter_string, "meter=%u,%s",
> +                          desired_meter->meter_id,
> +                          ds_cstr(&desired_meter->meter));
> +
> +            error = parse_ofp_meter_mod_str(&mm, ds_cstr(&meter_string),
> +                                            OFPMC13_ADD,
> &usable_protocols);
> +            if (!error) {
> +                add_meter_mod(&mm, &msgs);
> +            } else {
> +                static struct vlog_rate_limit rl =
> VLOG_RATE_LIMIT_INIT(5, 1);
> +                VLOG_ERR_RL(&rl, "new meter %s %s", error,
> +                         ds_cstr(&meter_string));
> +                free(error);
> +            }
> +            ds_destroy(&meter_string);
> +        }
> +    }
> +
>      /* Iterate through all of the installed flows.  If any of them are no
>       * longer desired, delete them; if any of them should have different
>       * actions, update them. */
> @@ -1012,6 +1118,54 @@ ofctrl_put(struct hmap *flow_table, struct shash
> *pending_ct_zones,
>          }
>      }
>
> +    /* Iterate through the installed meters from previous runs. If they
> +     * are not needed delete them. */
> +    struct meter_info *installed_meter, *next_meter;
> +    HMAP_FOR_EACH_SAFE(installed_meter, next_meter, hmap_node,
> +                       &meters->existing_meters) {
> +        if (!ovn_meter_lookup(&meters->desired_meters, installed_meter))
> {
> +            /* Delete the meter. */
> +            struct ofputil_meter_mod mm;
> +            enum ofputil_protocol usable_protocols;
> +            char *error;
> +            struct ds meter_string = DS_EMPTY_INITIALIZER;
> +            ds_put_format(&meter_string, "meter=%u",
> installed_meter->meter_id);
> +
> +            error = parse_ofp_meter_mod_str(&mm, ds_cstr(&meter_string),
> +                                            OFPMC13_DELETE,
> &usable_protocols);
> +            if (!error) {
> +                add_meter_mod(&mm, &msgs);
> +            } else {
> +                static struct vlog_rate_limit rl =
> VLOG_RATE_LIMIT_INIT(5, 1);
> +                VLOG_ERR_RL(&rl, "Error deleting meter %d: %s",
> +                         installed_meter->meter_id, error);
> +                free(error);
> +            }
> +            ds_destroy(&meter_string);
> +
> +            /* Remove 'installed_meter' from 'meters->existing_meters' */
> +            hmap_remove(&meters->existing_meters,
> &installed_meter->hmap_node);
> +            ds_destroy(&installed_meter->meter);
> +
> +            /* Dealloc meter_id. */
> +            bitmap_set0(meters->meter_ids, installed_meter->meter_id);
> +            free(installed_meter);
> +        }
> +    }
> +
> +    /* Move the contents of desired_meters to existing_meters. */
> +    HMAP_FOR_EACH_SAFE(desired_meter, next_meter, hmap_node,
> +                       &meters->desired_meters) {
> +        hmap_remove(&meters->desired_meters, &desired_meter->hmap_node);
> +        if (!ovn_meter_lookup(&meters->existing_meters, desired_meter)) {
> +            hmap_insert(&meters->existing_meters,
> &desired_meter->hmap_node,
> +                        desired_meter->hmap_node.hash);
> +        } else {
> +           ds_destroy(&desired_meter->meter);
> +           free(desired_meter);
> +        }
> +    }
> +
>      if (!ovs_list_is_empty(&msgs)) {
>          /* Add a barrier to the list of messages. */
>          struct ofpbuf *barrier = ofputil_encode_barrier_
> request(OFP13_VERSION);
> diff --git a/ovn/controller/ofctrl.h b/ovn/controller/ofctrl.h
> index d83f6aec4..e680e2d61 100644
> --- a/ovn/controller/ofctrl.h
> +++ b/ovn/controller/ofctrl.h
> @@ -24,6 +24,7 @@
>
>  struct controller_ctx;
>  struct group_table;
> +struct meter_table;
>  struct hmap;
>  struct match;
>  struct ofpbuf;
> @@ -31,7 +32,7 @@ struct ovsrec_bridge;
>  struct shash;
>
>  /* Interface for OVN main loop. */
> -void ofctrl_init(struct group_table *group_table);
> +void ofctrl_init(struct group_table *group_table, struct meter_table
> *meter_table);
>  enum mf_field_id ofctrl_run(const struct ovsrec_bridge *br_int,
>                              struct shash *pending_ct_zones);
>  bool ofctrl_can_put(void);
> @@ -58,4 +59,7 @@ void ofctrl_flow_table_clear(void);
>  void ovn_group_table_clear(struct group_table *group_table,
>                             bool existing);
>
> +void ovn_meter_table_clear(struct meter_table *meter_table,
> +                           bool existing);
> +
>  #endif /* ovn/ofctrl.h */
> diff --git a/ovn/controller/ovn-controller.c b/ovn/controller/ovn-
> controller.c
> index a935a791c..c5926bc83 100644
> --- a/ovn/controller/ovn-controller.c
> +++ b/ovn/controller/ovn-controller.c
> @@ -599,9 +599,16 @@ main(int argc, char *argv[])
>      hmap_init(&group_table.desired_groups);
>      hmap_init(&group_table.existing_groups);
>
> +    /* Initialize meter ids for QoS. */
> +    struct meter_table meter_table;
> +    meter_table.meter_ids = bitmap_allocate(MAX_OVN_METERS);
> +    bitmap_set1(meter_table.meter_ids, 0); /* Meter id 0 is invalid. */
> +    hmap_init(&meter_table.desired_meters);
> +    hmap_init(&meter_table.existing_meters);
> +
>      daemonize_complete();
>
> -    ofctrl_init(&group_table);
> +    ofctrl_init(&group_table, &meter_table);
>      pinctrl_init();
>      lflow_init();
>
> @@ -711,8 +718,8 @@ main(int argc, char *argv[])
>                      struct hmap flow_table =
> HMAP_INITIALIZER(&flow_table);
>                      lflow_run(&ctx, chassis,
>                                &chassis_index, &local_datapaths,
> &group_table,
> -                              &addr_sets, &flow_table, &active_tunnels,
> -                              &local_lport_ids);
> +                              &meter_table, &addr_sets, &flow_table,
> +                              &active_tunnels, &local_lport_ids);
>
>                      if (chassis_id) {
>                          bfd_run(&ctx, br_int, chassis, &local_datapaths,
> @@ -856,6 +863,18 @@ main(int argc, char *argv[])
>      }
>      hmap_destroy(&group_table.existing_groups);
>
> +    bitmap_free(meter_table.meter_ids);
> +    hmap_destroy(&meter_table.desired_meters);
> +
> +    struct meter_info *installed_meter, *next_meter;
> +    HMAP_FOR_EACH_SAFE(installed_meter, next_meter, hmap_node,
> +                       &meter_table.existing_meters) {
> +        hmap_remove(&meter_table.existing_meters,
> &installed_meter->hmap_node);
> +        ds_destroy(&installed_meter->meter);
> +        free(installed_meter);
> +    }
> +    hmap_destroy(&meter_table.existing_meters);
> +
>      ovsdb_idl_loop_destroy(&ovs_idl_loop);
>      ovsdb_idl_loop_destroy(&ovnsb_idl_loop);
>
> diff --git a/ovn/lib/actions.c b/ovn/lib/actions.c
> index d0d73b69c..9c1f5f963 100644
> --- a/ovn/lib/actions.c
> +++ b/ovn/lib/actions.c
> @@ -1873,6 +1873,118 @@ ovnact_log_free(struct ovnact_log *log)
>      free(log->name);
>  }
>
> +static void
> +parse_set_meter_action(struct action_context *ctx)
> +{
> +    int rate;
> +    int burst = 0;
> +
> +    if (ctx->pp->cur_ltable >= ctx->pp->n_tables) {
> +        lexer_error(ctx->lexer,
> +                    "\"set_meter\" action not allowed in last table.");
> +        return;
> +    }
> +
> +    lexer_force_match(ctx->lexer, LEX_T_LPAREN);
> +    lexer_force_int(ctx->lexer, &rate);
> +    if (lexer_match(ctx->lexer, LEX_T_COMMA)) {
> +        lexer_force_int(ctx->lexer, &burst);
> +    }
> +    lexer_force_match(ctx->lexer, LEX_T_RPAREN);
> +
> +    struct ovnact_set_meter *cl = ovnact_put_SET_METER(ctx->ovnacts);
> +    cl->rate = (uint32_t)rate;
> +    cl->burst = (uint32_t)burst;
> +}
> +
> +static void
> +format_SET_METER(const struct ovnact_set_meter *cl, struct ds *s)
> +{
> +    if (cl->burst) {
> +        ds_put_format(s, "set_meter(%d ,%d);", cl->rate, cl->burst);
> +    } else {
> +        ds_put_format(s, "set_meter(%d);", cl->rate);
> +    }
> +}
> +
> +static void
> +encode_SET_METER(const struct ovnact_set_meter *cl,
> +                 const struct ovnact_encode_params *ep,
> +                 struct ofpbuf *ofpacts)
> +{
> +    uint32_t meter_id = 0, hash;
> +    struct meter_info *meter_info;
> +    struct ofpact_meter *om;
> +
> +    struct ds ds = DS_EMPTY_INITIALIZER;
> +    if (cl->burst) {
> +        ds_put_format(&ds,
> +                      "kbps burst stats bands=type=drop rate=%d
> burst_size=%d",
> +                      cl->rate, cl->burst);
> +    } else {
> +        ds_put_format(&ds, "kbps stats bands=type=drop rate=%d",
> cl->rate);
> +    }
> +
> +    hash = hash_string(ds_cstr(&ds), 0);
> +
> +    /* Check whether we have non installed but allocated meter_id. */
> +    HMAP_FOR_EACH_WITH_HASH (meter_info, hmap_node, hash,
> +                             &ep->meter_table->desired_meters) {
> +        if (!strcmp(ds_cstr(&meter_info->meter), ds_cstr(&ds))) {
> +            meter_id = meter_info->meter_id;
> +            break;
> +        }
> +    }
> +
> +    if (!meter_id) {
> +        /* Check whether we already have an installed entry for this
> +         * combination. */
> +        HMAP_FOR_EACH_WITH_HASH (meter_info, hmap_node, hash,
> +                                 &ep->meter_table->existing_meters) {
> +            if (!strcmp(ds_cstr(&meter_info->meter), ds_cstr(&ds))) {
> +                meter_id = meter_info->meter_id;
> +            }
> +        }
> +
> +        bool new_meter_id = false;
> +        if (!meter_id) {
> +            /* Reserve a new meter_id. */
> +            meter_id = bitmap_scan(ep->meter_table->meter_ids, 0, 1,
> +                                   MAX_OVN_METERS + 1);
> +            new_meter_id = true;
> +        }
> +
> +        if (meter_id == MAX_OVN_METERS + 1) {
> +            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
> +            VLOG_ERR_RL(&rl, "out of meter ids");
> +
> +            ds_destroy(&ds);
> +            return;
> +        }
> +        bitmap_set1(ep->meter_table->meter_ids, meter_id);
> +
> +        meter_info = xmalloc(sizeof *meter_info);
> +        meter_info->meter = ds;
> +        meter_info->meter_id = meter_id;
> +        meter_info->hmap_node.hash = hash;
> +        meter_info->new_meter_id = new_meter_id;
> +
> +        hmap_insert(&ep->meter_table->desired_meters,
> +                    &meter_info->hmap_node, meter_info->hmap_node.hash);
> +    } else {
> +        ds_destroy(&ds);
> +    }
> +
> +    /* Create an action to set the meter. */
> +    om = ofpact_put_METER(ofpacts);
> +    om->meter_id = meter_id;
> +}
> +
> +static void
> +ovnact_set_meter_free(struct ovnact_set_meter *ct OVS_UNUSED)
> +{
> +}
> +
>  /* Parses an assignment or exchange or put_dhcp_opts action. */
>  static void
>  parse_set_action(struct action_context *ctx)
> @@ -1954,6 +2066,8 @@ parse_action(struct action_context *ctx)
>          parse_SET_QUEUE(ctx);
>      } else if (lexer_match_id(ctx->lexer, "log")) {
>          parse_LOG(ctx);
> +    } else if (lexer_match_id(ctx->lexer, "set_meter")) {
> +        parse_set_meter_action(ctx);
>      } else {
>          lexer_syntax_error(ctx->lexer, "expecting action");
>      }
> diff --git a/ovn/northd/ovn-northd.8.xml b/ovn/northd/ovn-northd.8.xml
> index 0d85ec0d2..5a2febb27 100644
> --- a/ovn/northd/ovn-northd.8.xml
> +++ b/ovn/northd/ovn-northd.8.xml
> @@ -364,7 +364,28 @@
>        </li>
>      </ul>
>
> -    <h3>Ingress Table 8: LB</h3>
> +    <h3>Ingress Table 8: <code>from-lport</code> QoS meter</h3>
> +
> +    <p>
> +      Logical flows in this table closely reproduce those in the
> +      <code>QoS</code> table <code>bandwidth</code> column in the
> +      <code>OVN_Northbound</code> database for the <code>from-lport</code>
> +      direction.
> +    </p>
> +
> +    <ul>
> +      <li>
> +        For every qos_rules for every logical switch a flow will be added
> at
> +        priorities mentioned in the QoS table.
> +      </li>
> +
> +      <li>
> +        One priority-0 fallback flow that matches all packets and
> advances to
> +        the next table.
> +      </li>
> +    </ul>
> +
> +    <h3>Ingress Table 9: LB</h3>
>
>      <p>
>        It contains a priority-0 flow that simply moves traffic to the next
> @@ -377,7 +398,7 @@
>        connection.)
>      </p>
>
> -    <h3>Ingress Table 9: Stateful</h3>
> +    <h3>Ingress Table 10: Stateful</h3>
>
>      <ul>
>        <li>
> @@ -414,7 +435,7 @@
>        </li>
>      </ul>
>
> -    <h3>Ingress Table 10: ARP/ND responder</h3>
> +    <h3>Ingress Table 11: ARP/ND responder</h3>
>
>      <p>
>        This table implements ARP/ND responder in a logical switch for known
> @@ -564,7 +585,7 @@ nd_na {
>        </li>
>      </ul>
>
> -    <h3>Ingress Table 11: DHCP option processing</h3>
> +    <h3>Ingress Table 12: DHCP option processing</h3>
>
>      <p>
>        This table adds the DHCPv4 options to a DHCPv4 packet from the
> @@ -624,7 +645,7 @@ next;
>        </li>
>      </ul>
>
> -    <h3>Ingress Table 12: DHCP responses</h3>
> +    <h3>Ingress Table 13: DHCP responses</h3>
>
>      <p>
>        This table implements DHCP responder for the DHCP replies generated
> by
> @@ -706,7 +727,7 @@ output;
>        </li>
>      </ul>
>
> -    <h3>Ingress Table 13 DNS Lookup</h3>
> +    <h3>Ingress Table 14 DNS Lookup</h3>
>
>      <p>
>        This table looks up and resolves the DNS names to the corresponding
> @@ -735,7 +756,7 @@ reg0[4] = dns_lookup(); next;
>        </li>
>      </ul>
>
> -    <h3>Ingress Table 14 DNS Responses</h3>
> +    <h3>Ingress Table 15 DNS Responses</h3>
>
>      <p>
>        This table implements DNS responder for the DNS replies generated by
> @@ -770,7 +791,7 @@ output;
>        </li>
>      </ul>
>
> -    <h3>Ingress Table 15 Destination Lookup</h3>
> +    <h3>Ingress Table 16 Destination Lookup</h3>
>
>      <p>
>        This table implements switching behavior.  It contains these logical
> @@ -872,7 +893,14 @@ output;
>        <code>to-lport</code> qos rules.
>      </p>
>
> -    <h3>Egress Table 6: Stateful</h3>
> +    <h3>Egress Table 6: <code>to-lport</code> QoS meter</h3>
> +
> +    <p>
> +      This is similar to ingress table <code>QoS meter</code> except for
> +      <code>to-lport</code> qos rules.
> +    </p>
> +
> +    <h3>Egress Table 7: Stateful</h3>
>
>      <p>
>        This is similar to ingress table <code>Stateful</code> except that
> @@ -887,18 +915,18 @@ output;
>          A priority 34000 logical flow is added for each logical port which
>          has DHCPv4 options defined to allow the DHCPv4 reply packet and
> which has
>          DHCPv6 options defined to allow the DHCPv6 reply packet from the
> -        <code>Ingress Table 12: DHCP responses</code>.
> +        <code>Ingress Table 13: DHCP responses</code>.
>        </li>
>
>        <li>
>          A priority 34000 logical flow is added for each logical switch
> datapath
>          configured with DNS records with the match <code>udp.dst =
> 53</code>
>          to allow the DNS reply packet from the
> -        <code>Ingress Table 14:DNS responses</code>.
> +        <code>Ingress Table 15:DNS responses</code>.
>        </li>
>      </ul>
>
> -    <h3>Egress Table 7: Egress Port Security - IP</h3>
> +    <h3>Egress Table 8: Egress Port Security - IP</h3>
>
>      <p>
>        This is similar to the port security logic in table
> @@ -908,7 +936,7 @@ output;
>        <code>ip4.src</code> and <code>ip6.src</code>
>      </p>
>
> -    <h3>Egress Table 8: Egress Port Security - L2</h3>
> +    <h3>Egress Table 9: Egress Port Security - L2</h3>
>
>      <p>
>        This is similar to the ingress port security logic in ingress table
> diff --git a/ovn/northd/ovn-northd.c b/ovn/northd/ovn-northd.c
> index 2db238073..4c0d6fcd0 100644
> --- a/ovn/northd/ovn-northd.c
> +++ b/ovn/northd/ovn-northd.c
> @@ -108,25 +108,27 @@ enum ovn_stage {
>      PIPELINE_STAGE(SWITCH, IN,  PRE_STATEFUL,   5, "ls_in_pre_stateful")
> \
>      PIPELINE_STAGE(SWITCH, IN,  ACL,            6, "ls_in_acl")
>  \
>      PIPELINE_STAGE(SWITCH, IN,  QOS_MARK,       7, "ls_in_qos_mark")
> \
> -    PIPELINE_STAGE(SWITCH, IN,  LB,             8, "ls_in_lb")
> \
> -    PIPELINE_STAGE(SWITCH, IN,  STATEFUL,       9, "ls_in_stateful")
> \
> -    PIPELINE_STAGE(SWITCH, IN,  ARP_ND_RSP,    10, "ls_in_arp_rsp")
>  \
> -    PIPELINE_STAGE(SWITCH, IN,  DHCP_OPTIONS,  11, "ls_in_dhcp_options")
> \
> -    PIPELINE_STAGE(SWITCH, IN,  DHCP_RESPONSE, 12, "ls_in_dhcp_response")
> \
> -    PIPELINE_STAGE(SWITCH, IN,  DNS_LOOKUP,      13, "ls_in_dns_lookup") \
> -    PIPELINE_STAGE(SWITCH, IN,  DNS_RESPONSE,  14, "ls_in_dns_response") \
> -    PIPELINE_STAGE(SWITCH, IN,  L2_LKUP,       15, "ls_in_l2_lkup")
>  \
> -                                                                      \
> -    /* Logical switch egress stages. */                               \
> -    PIPELINE_STAGE(SWITCH, OUT, PRE_LB,       0, "ls_out_pre_lb")     \
> -    PIPELINE_STAGE(SWITCH, OUT, PRE_ACL,      1, "ls_out_pre_acl")     \
> -    PIPELINE_STAGE(SWITCH, OUT, PRE_STATEFUL, 2, "ls_out_pre_stateful")  \
> -    PIPELINE_STAGE(SWITCH, OUT, LB,           3, "ls_out_lb")            \
> +    PIPELINE_STAGE(SWITCH, IN,  QOS_METER,      8, "ls_in_qos_meter")
>  \
> +    PIPELINE_STAGE(SWITCH, IN,  LB,             9, "ls_in_lb")
> \
> +    PIPELINE_STAGE(SWITCH, IN,  STATEFUL,      10, "ls_in_stateful")
> \
> +    PIPELINE_STAGE(SWITCH, IN,  ARP_ND_RSP,    11, "ls_in_arp_rsp")
>  \
> +    PIPELINE_STAGE(SWITCH, IN,  DHCP_OPTIONS,  12, "ls_in_dhcp_options")
> \
> +    PIPELINE_STAGE(SWITCH, IN,  DHCP_RESPONSE, 13, "ls_in_dhcp_response")
> \
> +    PIPELINE_STAGE(SWITCH, IN,  DNS_LOOKUP,    14, "ls_in_dns_lookup")
> \
> +    PIPELINE_STAGE(SWITCH, IN,  DNS_RESPONSE,  15, "ls_in_dns_response")
> \
> +    PIPELINE_STAGE(SWITCH, IN,  L2_LKUP,       16, "ls_in_l2_lkup")
>  \
> +
> \
> +    /* Logical switch egress stages. */
>  \
> +    PIPELINE_STAGE(SWITCH, OUT, PRE_LB,       0, "ls_out_pre_lb")
>  \
> +    PIPELINE_STAGE(SWITCH, OUT, PRE_ACL,      1, "ls_out_pre_acl")
> \
> +    PIPELINE_STAGE(SWITCH, OUT, PRE_STATEFUL, 2, "ls_out_pre_stateful")
>  \
> +    PIPELINE_STAGE(SWITCH, OUT, LB,           3, "ls_out_lb")
>  \
>      PIPELINE_STAGE(SWITCH, OUT, ACL,          4, "ls_out_acl")
> \
>      PIPELINE_STAGE(SWITCH, OUT, QOS_MARK,     5, "ls_out_qos_mark")
>  \
> -    PIPELINE_STAGE(SWITCH, OUT, STATEFUL,     6, "ls_out_stateful")
>  \
> -    PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_IP,  7, "ls_out_port_sec_ip")
> \
> -    PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_L2,  8, "ls_out_port_sec_l2")
> \
> +    PIPELINE_STAGE(SWITCH, OUT, QOS_METER,    6, "ls_out_qos_meter")
> \
> +    PIPELINE_STAGE(SWITCH, OUT, STATEFUL,     7, "ls_out_stateful")
>  \
> +    PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_IP,  8, "ls_out_port_sec_ip")
> \
> +    PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_L2,  9, "ls_out_port_sec_l2")
> \
>                                                                        \
>      /* Logical router ingress stages. */                              \
>      PIPELINE_STAGE(ROUTER, IN,  ADMISSION,   0, "lr_in_admission")    \
> @@ -3365,21 +3367,49 @@ static void
>  build_qos(struct ovn_datapath *od, struct hmap *lflows) {
>      ovn_lflow_add(lflows, od, S_SWITCH_IN_QOS_MARK, 0, "1", "next;");
>      ovn_lflow_add(lflows, od, S_SWITCH_OUT_QOS_MARK, 0, "1", "next;");
> +    ovn_lflow_add(lflows, od, S_SWITCH_IN_QOS_METER, 0, "1", "next;");
> +    ovn_lflow_add(lflows, od, S_SWITCH_OUT_QOS_METER, 0, "1", "next;");
>
>      for (size_t i = 0; i < od->nbs->n_qos_rules; i++) {
>          struct nbrec_qos *qos = od->nbs->qos_rules[i];
>          bool ingress = !strcmp(qos->direction, "from-lport") ? true
> :false;
>          enum ovn_stage stage = ingress ? S_SWITCH_IN_QOS_MARK :
> S_SWITCH_OUT_QOS_MARK;
> +        uint32_t rate = 0;
> +        uint32_t burst = 0;
> +
> +        for (size_t j = 0; j < qos->n_action; j++) {
> +            if (!strcmp(qos->key_action[j], "dscp")) {
> +                struct ds dscp_action = DS_EMPTY_INITIALIZER;
> +
> +                ds_put_format(&dscp_action, "ip.dscp = %d; next;",
> +                              (uint8_t)qos->value_action[j]);
> +                ovn_lflow_add(lflows, od, stage,
> +                              qos->priority,
> +                              qos->match, ds_cstr(&dscp_action));
> +                ds_destroy(&dscp_action);
> +            }
> +        }
>
> -        if (!strcmp(qos->key_action, "dscp")) {
> -            struct ds dscp_action = DS_EMPTY_INITIALIZER;
> -
> -            ds_put_format(&dscp_action, "ip.dscp = %d; next;",
> -                          (uint8_t)qos->value_action);
> +        for (size_t n = 0; n < qos->n_bandwidth; n++) {
> +            if (!strcmp(qos->key_bandwidth[n], "rate")) {
> +                rate = (uint32_t)qos->value_bandwidth[n];
> +            } else if (!strcmp(qos->key_bandwidth[n], "burst")) {
> +                burst = (uint32_t)qos->value_bandwidth[n];
> +            }
> +        }
> +        if (rate) {
> +            struct ds meter_action = DS_EMPTY_INITIALIZER;
> +            stage = ingress ? S_SWITCH_IN_QOS_METER :
> S_SWITCH_OUT_QOS_METER;
> +            if (burst) {
> +                ds_put_format(&meter_action, "set_meter(%d, %d); next;",
> +                              rate, burst);
> +            } else {
> +                ds_put_format(&meter_action, "set_meter(%d); next;",
> rate);
> +            }
>              ovn_lflow_add(lflows, od, stage,
>                            qos->priority,
> -                          qos->match, ds_cstr(&dscp_action));
> -            ds_destroy(&dscp_action);
> +                          qos->match, ds_cstr(&meter_action));
> +            ds_destroy(&meter_action);
>          }
>      }
>  }
> @@ -3489,7 +3519,7 @@ build_lswitch_flows(struct hmap *datapaths, struct
> hmap *ports,
>      struct ds actions = DS_EMPTY_INITIALIZER;
>
>      /* Build pre-ACL and ACL tables for both ingress and egress.
> -     * Ingress tables 3 through 9.  Egress tables 0 through 6. */
> +     * Ingress tables 3 through 10.  Egress tables 0 through 7. */
>      struct ovn_datapath *od;
>      HMAP_FOR_EACH (od, key_node, datapaths) {
>          if (!od->nbs) {
> @@ -3572,7 +3602,7 @@ build_lswitch_flows(struct hmap *datapaths, struct
> hmap *ports,
>          ovn_lflow_add(lflows, od, S_SWITCH_IN_PORT_SEC_IP, 0, "1",
> "next;");
>      }
>
> -    /* Ingress table 10: ARP/ND responder, skip requests coming from
> localnet
> +    /* Ingress table 11: ARP/ND responder, skip requests coming from
> localnet
>       * and vtep ports. (priority 100); see ovn-northd.8.xml for the
>       * rationale. */
>      HMAP_FOR_EACH (op, key_node, ports) {
> @@ -3589,7 +3619,7 @@ build_lswitch_flows(struct hmap *datapaths, struct
> hmap *ports,
>          }
>      }
>
> -    /* Ingress table 10: ARP/ND responder, reply for known IPs.
> +    /* Ingress table 11: ARP/ND responder, reply for known IPs.
>       * (priority 50). */
>      HMAP_FOR_EACH (op, key_node, ports) {
>          if (!op->nbsp) {
> @@ -3684,7 +3714,7 @@ build_lswitch_flows(struct hmap *datapaths, struct
> hmap *ports,
>          }
>      }
>
> -    /* Ingress table 10: ARP/ND responder, by default goto next.
> +    /* Ingress table 11: ARP/ND responder, by default goto next.
>       * (priority 0)*/
>      HMAP_FOR_EACH (od, key_node, datapaths) {
>          if (!od->nbs) {
> @@ -3694,7 +3724,7 @@ build_lswitch_flows(struct hmap *datapaths, struct
> hmap *ports,
>          ovn_lflow_add(lflows, od, S_SWITCH_IN_ARP_ND_RSP, 0, "1",
> "next;");
>      }
>
> -    /* Logical switch ingress table 11 and 12: DHCP options and response
> +    /* Logical switch ingress table 12 and 13: DHCP options and response
>           * priority 100 flows. */
>      HMAP_FOR_EACH (op, key_node, ports) {
>          if (!op->nbsp) {
> @@ -3796,7 +3826,7 @@ build_lswitch_flows(struct hmap *datapaths, struct
> hmap *ports,
>          }
>      }
>
> -    /* Logical switch ingress table 13 and 14: DNS lookup and response
> +    /* Logical switch ingress table 14 and 15: DNS lookup and response
>       * priority 100 flows.
>       */
>      HMAP_FOR_EACH (od, key_node, datapaths) {
> @@ -3828,9 +3858,9 @@ build_lswitch_flows(struct hmap *datapaths, struct
> hmap *ports,
>          ds_destroy(&action);
>      }
>
> -    /* Ingress table 11 and 12: DHCP options and response, by default
> goto next.
> +    /* Ingress table 12 and 13: DHCP options and response, by default
> goto next.
>       * (priority 0).
> -     * Ingress table 13 and 14: DNS lookup and response, by default goto
> next.
> +     * Ingress table 14 and 15: DNS lookup and response, by default goto
> next.
>       * (priority 0).*/
>
>      HMAP_FOR_EACH (od, key_node, datapaths) {
> @@ -3844,7 +3874,7 @@ build_lswitch_flows(struct hmap *datapaths, struct
> hmap *ports,
>          ovn_lflow_add(lflows, od, S_SWITCH_IN_DNS_RESPONSE, 0, "1",
> "next;");
>      }
>
> -    /* Ingress table 15: Destination lookup, broadcast and multicast
> handling
> +    /* Ingress table 16: Destination lookup, broadcast and multicast
> handling
>       * (priority 100). */
>      HMAP_FOR_EACH (op, key_node, ports) {
>          if (!op->nbsp) {
> @@ -3864,7 +3894,7 @@ build_lswitch_flows(struct hmap *datapaths, struct
> hmap *ports,
>                        "outport = \""MC_FLOOD"\"; output;");
>      }
>
> -    /* Ingress table 13: Destination lookup, unicast handling (priority
> 50), */
> +    /* Ingress table 16: Destination lookup, unicast handling (priority
> 50), */
>      HMAP_FOR_EACH (op, key_node, ports) {
>          if (!op->nbsp) {
>              continue;
> @@ -3964,7 +3994,7 @@ build_lswitch_flows(struct hmap *datapaths, struct
> hmap *ports,
>          }
>      }
>
> -    /* Ingress table 13: Destination lookup for unknown MACs (priority
> 0). */
> +    /* Ingress table 16: Destination lookup for unknown MACs (priority
> 0). */
>      HMAP_FOR_EACH (od, key_node, datapaths) {
>          if (!od->nbs) {
>              continue;
> @@ -3976,8 +4006,8 @@ build_lswitch_flows(struct hmap *datapaths, struct
> hmap *ports,
>          }
>      }
>
> -    /* Egress tables 6: Egress port security - IP (priority 0)
> -     * Egress table 7: Egress port security L2 - multicast/broadcast
> +    /* Egress tables 8: Egress port security - IP (priority 0)
> +     * Egress table 9: Egress port security L2 - multicast/broadcast
>       *                 (priority 100). */
>      HMAP_FOR_EACH (od, key_node, datapaths) {
>          if (!od->nbs) {
> @@ -3989,10 +4019,10 @@ build_lswitch_flows(struct hmap *datapaths, struct
> hmap *ports,
>                        "output;");
>      }
>
> -    /* Egress table 6: Egress port security - IP (priorities 90 and 80)
> +    /* Egress table 8: Egress port security - IP (priorities 90 and 80)
>       * if port security enabled.
>       *
> -     * Egress table 7: Egress port security - L2 (priorities 50 and 150).
> +     * Egress table 9: Egress port security - L2 (priorities 50 and 150).
>       *
>       * Priority 50 rules implement port security for enabled logical port.
>       *
> diff --git a/ovn/ovn-nb.ovsschema b/ovn/ovn-nb.ovsschema
> index a077bfb81..23ab008cd 100644
> --- a/ovn/ovn-nb.ovsschema
> +++ b/ovn/ovn-nb.ovsschema
> @@ -1,7 +1,7 @@
>  {
>      "name": "OVN_Northbound",
>      "version": "5.8.0",
> -    "cksum": "2812300190 16766",
> +    "cksum": "3886656843 17257",
>      "tables": {
>          "NB_Global": {
>              "columns": {
> @@ -164,7 +164,14 @@
>                                              "enum": ["set", ["dscp"]]},
>                                      "value": {"type": "integer",
>                                                "minInteger": 0,
> -                                              "maxInteger": 63}}},
> +                                              "maxInteger": 63},
> +                                    "min": 0, "max": "unlimited"}},
> +                "bandwidth": {"type": {"key": {"type": "string",
> +                                               "enum": ["set", ["rate",
> "burst"]]},
> +                                       "value": {"type": "integer",
> +                                                 "minInteger": 1,
> +                                                 "maxInteger": 65535},
> +                                       "min": 0, "max": "unlimited"}},
>                  "external_ids": {
>                      "type": {"key": "string", "value": "string",
>                               "min": 0, "max": "unlimited"}}},
> diff --git a/ovn/ovn-nb.xml b/ovn/ovn-nb.xml
> index 9869d7ed7..c666ec2c4 100644
> --- a/ovn/ovn-nb.xml
> +++ b/ovn/ovn-nb.xml
> @@ -1250,6 +1250,22 @@
>        </ul>
>      </column>
>
> +    <column name="bandwidth">
> +      <p>
> +         The bandwidth limit to be performed on the matched packet.
> +         Currently only supported in the userspace by dpdk.
> +      </p>
> +      <ul>
> +        <li>
> +          <code>rate</code>: The value of rate limit.
> +        </li>
> +        <li>
> +          <code>burst</code>: The value of burst rate limit. This is
> optional
> +          and needs to specify the <code>rate</code> first.
> +        </li>
> +      </ul>
> +    </column>
> +
>      <column name="external_ids">
>        See <em>External IDs</em> at the beginning of this document.
>      </column>
> diff --git a/ovn/ovn-sb.xml b/ovn/ovn-sb.xml
> index 0a894f8cb..ee1db3f6f 100644
> --- a/ovn/ovn-sb.xml
> +++ b/ovn/ovn-sb.xml
> @@ -1516,6 +1516,21 @@
>              <b>Prerequisite:</b> <code>udp</code>
>            </p>
>          </dd>
> +
> +        <dt><code>set_meter(<var>rate</var>);</code></dt>
> +        <dt><code>set_meter(<var>rate</var>,
> <var>burst</var>);</code></dt>
> +        <dd>
> +          <p>
> +            <b>Parameters</b>: rate limit int field <var>rate</var>,
> burst rate limits
> +            int field <var>burst</var>.
> +          </p>
> +
> +          <p>
> +            This action sets the rate limit for a flow.
> +          </p>
> +
> +          <p><b>Example:</b> <code>set_meter(100, 1000);</code></p>
> +        </dd>
>        </dl>
>
>        <dl>
> diff --git a/ovn/utilities/ovn-trace.c b/ovn/utilities/ovn-trace.c
> index 59083eebe..bb5593876 100644
> --- a/ovn/utilities/ovn-trace.c
> +++ b/ovn/utilities/ovn-trace.c
> @@ -1833,6 +1833,10 @@ trace_actions(const struct ovnact *ovnacts, size_t
> ovnacts_len,
>          case OVNACT_LOG:
>              execute_log(ovnact_get_LOG(a), uflow, super);
>              break;
> +
> +        case OVNACT_SET_METER:
> +            /* Nothing to do. */
> +            break;
>          }
>
>      }
> diff --git a/tests/ovn.at b/tests/ovn.at
> index 6c38b973f..0e34f7df4 100644
> --- a/tests/ovn.at
> +++ b/tests/ovn.at
> @@ -5885,7 +5885,7 @@ OVN_CLEANUP([hv])
>  AT_CLEANUP
>
>
> -AT_SETUP([ovn -- DSCP marking check])
> +AT_SETUP([ovn -- DSCP marking and meter check])
>  AT_KEYWORDS([ovn])
>  ovn_start
>
> @@ -5952,13 +5952,16 @@ AT_CHECK([get_final_nw_tos], [0], [none
>  check_tos 0
>
>  # Mark DSCP with a valid value
> -qos_id=$(ovn-nbctl --wait=hv -- --id=@lp1-qos create QoS priority=100
> action=dscp=48 match="inport\=\=\"lp1\"" direction="from-lport" -- set
> Logical_Switch lsw0 qos_rules=@lp1-qos)
> +qos_id=$(ovn-nbctl --wait=hv -- --id=@lp1-qos create QoS priority=100
> action=dscp=48 bandwidth=rate=100,burst=1000 match="inport\=\=\"lp1\""
> direction="from-lport" -- set Logical_Switch lsw0 qos_rules=@lp1-qos)
>  check_tos 48
>
>  # Update the DSCP marking
>  ovn-nbctl --wait=hv set QoS $qos_id action=dscp=63
>  check_tos 63
>
> +# Update the meter rate
> +ovn-nbctl --wait=hv set QoS $qos_id bandwidth=rate=65535,burst=65535
> +
>  ovn-nbctl --wait=hv set QoS $qos_id match="outport\=\=\"lp2\""
> direction="to-lport"
>  check_tos 63
>
> diff --git a/tests/test-ovn.c b/tests/test-ovn.c
> index 4beb2b8d6..e9dcba231 100644
> --- a/tests/test-ovn.c
> +++ b/tests/test-ovn.c
> @@ -1206,6 +1206,13 @@ test_parse_actions(struct ovs_cmdl_context *ctx
> OVS_UNUSED)
>      hmap_init(&group_table.desired_groups);
>      hmap_init(&group_table.existing_groups);
>
> +    /* Initialize meter ids for QoS. */
> +    struct meter_table meter_table;
> +    meter_table.meter_ids = bitmap_allocate(MAX_OVN_METERS);
> +    bitmap_set1(meter_table.meter_ids, 0); /* Meter id 0 is invalid. */
> +    hmap_init(&meter_table.desired_meters);
> +    hmap_init(&meter_table.existing_meters);
> +
>      simap_init(&ports);
>      simap_put(&ports, "eth0", 5);
>      simap_put(&ports, "eth1", 6);
> @@ -1244,6 +1251,7 @@ test_parse_actions(struct ovs_cmdl_context *ctx
> OVS_UNUSED)
>                  .aux = &ports,
>                  .is_switch = true,
>                  .group_table = &group_table,
> +                .meter_table = &meter_table,
>
>                  .pipeline = OVNACT_P_INGRESS,
>                  .ingress_ptable = 8,
> --
> 2.13.2.windows.1
>
> _______________________________________________
> dev mailing list
> dev@openvswitch.org
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
>
Guoshuai Li Sept. 21, 2017, 2:40 p.m. UTC | #2
Userspace switch meter implementation has been added to the master and 2.8

https://github.com/openvswitch/ovs/blob/master/lib/dpif-netdev.c#L4262

But kernel meter not implementaton:

https://github.com/openvswitch/ovs/blob/master/lib/dpif-netlink.c#L2907



on 2017/9/21 19:03, Miguel Angel Ajo Pelayo write:
> I thought we didn't have meters yet in OvS switch implementation 
> (beyond openflow protocol support) as per:
>
> http://docs.openvswitch.org/en/latest/faq/qos/
>
> Has this changed in master?
>
> On Wed, Sep 20, 2017 at 1:52 PM, Guoshuai Li <ligs@dtdream.com 
> <mailto:ligs@dtdream.com>> wrote:
>
>     ovn-northd modify:
>     add bandwidth column in NB's QOS table.
>     add QOS_METER stages in Logical switch ingress/egress.
>     add set_meter() action in SB's LFlow table.
>
>     ovn-controller modify:
>     add meter_table for meter action process openflow meter table.
>
>     This feature is only supported in DPDK.
>     ---
>
>     v2: Fix Ingress/Egress Table id error.
>
>     ---
>      NEWS                            |   1 +
>      include/ovn/actions.h           |  31 +++++++-
>      ovn/controller/lflow.c          |   9 ++-
>      ovn/controller/lflow.h          |   2 +
>      ovn/controller/ofctrl.c         | 158
>     +++++++++++++++++++++++++++++++++++++++-
>      ovn/controller/ofctrl.h         |   6 +-
>      ovn/controller/ovn-controller.c |  25 ++++++-
>      ovn/lib/actions.c               | 114 +++++++++++++++++++++++++++++
>      ovn/northd/ovn-northd.8.xml     |  54 ++++++++++----
>      ovn/northd/ovn-northd.c         | 108 +++++++++++++++++----------
>      ovn/ovn-nb.ovsschema            |  11 ++-
>      ovn/ovn-nb.xml                  |  16 ++++
>      ovn/ovn-sb.xml                  |  15 ++++
>      ovn/utilities/ovn-trace.c       |   4 +
>      tests/ovn.at <http://ovn.at>           |   7 +-
>      tests/test-ovn.c                |   8 ++
>      16 files changed, 504 insertions(+), 65 deletions(-)
>
>     diff --git a/NEWS b/NEWS
>     index 6a5d2bf98..b97e7bff7 100644
>     --- a/NEWS
>     +++ b/NEWS
>     @@ -59,6 +59,7 @@ v2.8.0 - xx xxx xxxx
>             gateway.
>           * Add support for ACL logging.
>           * ovn-northd now has native support for active-standby high
>     availability.
>     +     * Add support for QoS bandwidth limt with DPDK.
>         - Tracing with ofproto/trace now traces through recirculation.
>         - OVSDB:
>           * New support for role-based access control (see
>     ovsdb-server(1)).
>     diff --git a/include/ovn/actions.h b/include/ovn/actions.h
>     index 0a04af7aa..8dbb895f3 100644
>     --- a/include/ovn/actions.h
>     +++ b/include/ovn/actions.h
>     @@ -72,7 +72,8 @@ struct simap;
>          OVNACT(PUT_DHCPV6_OPTS,   ovnact_put_dhcp_opts)   \
>          OVNACT(SET_QUEUE,         ovnact_set_queue)       \
>          OVNACT(DNS_LOOKUP,        ovnact_dns_lookup)      \
>     -    OVNACT(LOG,               ovnact_log)
>     +    OVNACT(LOG,               ovnact_log)             \
>     +    OVNACT(SET_METER,         ovnact_set_meter)
>
>      /* enum ovnact_type, with a member OVNACT_<ENUM> for each action. */
>      enum OVS_PACKED_ENUM ovnact_type {
>     @@ -274,6 +275,13 @@ struct ovnact_log {
>          char *name;
>      };
>
>     +/* OVNACT_SET_METER. */
>     +struct ovnact_set_meter {
>     +    struct ovnact ovnact;
>     +    uint32_t rate;              /* 32-bit rate field. */
>     +    uint32_t burst;             /* 32-bit burst rate field. */
>     +};
>     +
>      /* Internal use by the helpers below. */
>      void ovnact_init(struct ovnact *, enum ovnact_type, size_t len);
>      void *ovnact_put(struct ofpbuf *, enum ovnact_type, size_t len);
>     @@ -350,6 +358,24 @@ struct group_info {
>                               * group_table's 'group_ids' bitmap. */
>      };
>
>     +#define MAX_OVN_METERS 65535
>     +
>     +struct meter_table {
>     +    unsigned long *meter_ids;  /* Used as a bitmap with value set
>     +                                * for allocated meter ids in either
>     +                                * desired_meters or
>     existing_meters. */
>     +    struct hmap desired_meters;
>     +    struct hmap existing_meters;
>     +};
>     +
>     +struct meter_info {
>     +    struct hmap_node hmap_node;
>     +    struct ds meter;
>     +    uint32_t meter_id;
>     +    bool new_meter_id;  /* 'True' if 'meter_id' was reserved from
>     +                         * meter_table's 'meter_ids' bitmap. */
>     +};
>     +
>      enum action_opcode {
>          /* "arp { ...actions... }".
>           *
>     @@ -484,6 +510,9 @@ struct ovnact_encode_params {
>          /* A struct to figure out the group_id for group actions. */
>          struct group_table *group_table;
>
>     +    /* A struct to figure out the meter_id for meter actions. */
>     +    struct meter_table *meter_table;
>     +
>          /* OVN maps each logical flow table (ltable), one-to-one,
>     onto a physical
>           * OpenFlow flow table (ptable).  A number of parameters
>     describe this
>           * mapping and data related to flow tables:
>     diff --git a/ovn/controller/lflow.c b/ovn/controller/lflow.c
>     index 20a18c259..bf7a59f32 100644
>     --- a/ovn/controller/lflow.c
>     +++ b/ovn/controller/lflow.c
>     @@ -62,6 +62,7 @@ static void consider_logical_flow(struct
>     controller_ctx *ctx,
>                                        const struct sbrec_logical_flow
>     *lflow,
>                                        const struct hmap *local_datapaths,
>                                        struct group_table *group_table,
>     +                                  struct meter_table *meter_table,
>                                        const struct sbrec_chassis
>     *chassis,
>                                        struct hmap *dhcp_opts,
>                                        struct hmap *dhcpv6_opts,
>     @@ -143,6 +144,7 @@ add_logical_flows(struct controller_ctx *ctx,
>                        const struct chassis_index *chassis_index,
>                        const struct hmap *local_datapaths,
>                        struct group_table *group_table,
>     +                  struct meter_table *meter_table,
>                        const struct sbrec_chassis *chassis,
>                        const struct shash *addr_sets,
>                        struct hmap *flow_table,
>     @@ -170,7 +172,7 @@ add_logical_flows(struct controller_ctx *ctx,
>          SBREC_LOGICAL_FLOW_FOR_EACH (lflow, ctx->ovnsb_idl) {
>              consider_logical_flow(ctx, chassis_index,
>                                    lflow, local_datapaths,
>     -                              group_table, chassis,
>     +                              group_table, meter_table, chassis,
>                                    &dhcp_opts, &dhcpv6_opts, &conj_id_ofs,
>                                    addr_sets, flow_table, active_tunnels,
>                                    local_lport_ids);
>     @@ -186,6 +188,7 @@ consider_logical_flow(struct controller_ctx *ctx,
>                            const struct sbrec_logical_flow *lflow,
>                            const struct hmap *local_datapaths,
>                            struct group_table *group_table,
>     +                      struct meter_table *meter_table,
>                            const struct sbrec_chassis *chassis,
>                            struct hmap *dhcp_opts,
>                            struct hmap *dhcpv6_opts,
>     @@ -256,6 +259,7 @@ consider_logical_flow(struct controller_ctx *ctx,
>              .is_switch = is_switch(ldp),
>              .is_gateway_router = is_gateway_router(ldp, local_datapaths),
>              .group_table = group_table,
>     +        .meter_table = meter_table,
>
>              .pipeline = ingress ? OVNACT_P_INGRESS : OVNACT_P_EGRESS,
>              .ingress_ptable = OFTABLE_LOG_INGRESS_PIPELINE,
>     @@ -428,13 +432,14 @@ lflow_run(struct controller_ctx *ctx,
>                const struct chassis_index *chassis_index,
>                const struct hmap *local_datapaths,
>                struct group_table *group_table,
>     +          struct meter_table *meter_table,
>                const struct shash *addr_sets,
>                struct hmap *flow_table,
>                struct sset *active_tunnels,
>                struct sset *local_lport_ids)
>      {
>          add_logical_flows(ctx, chassis_index, local_datapaths,
>     -                      group_table, chassis, addr_sets, flow_table,
>     +                      group_table, meter_table, chassis,
>     addr_sets, flow_table,
>                            active_tunnels, local_lport_ids);
>          add_neighbor_flows(ctx, flow_table);
>      }
>     diff --git a/ovn/controller/lflow.h b/ovn/controller/lflow.h
>     index bfb7415e2..ad8d0a4c7 100644
>     --- a/ovn/controller/lflow.h
>     +++ b/ovn/controller/lflow.h
>     @@ -38,6 +38,7 @@
>      struct chassis_index;
>      struct controller_ctx;
>      struct group_table;
>     +struct meter_table;
>      struct hmap;
>      struct sbrec_chassis;
>      struct simap;
>     @@ -67,6 +68,7 @@ void lflow_run(struct controller_ctx *,
>                     const struct chassis_index *,
>                     const struct hmap *local_datapaths,
>                     struct group_table *group_table,
>     +               struct meter_table *meter_table,
>                     const struct shash *addr_sets,
>                     struct hmap *flow_table,
>                     struct sset *active_tunnels,
>     diff --git a/ovn/controller/ofctrl.c b/ovn/controller/ofctrl.c
>     index fc88a410b..d92258558 100644
>     --- a/ovn/controller/ofctrl.c
>     +++ b/ovn/controller/ofctrl.c
>     @@ -133,6 +133,9 @@ static struct hmap installed_flows;
>      /* A reference to the group_table. */
>      static struct group_table *groups;
>
>     +/* A reference to the meter_table. */
>     +static struct meter_table *meters;
>     +
>      /* MFF_* field ID for our Geneve option.  In
>     S_TLV_TABLE_MOD_SENT, this is
>       * the option we requested (we don't know whether we obtained it
>     yet).  In
>       * S_CLEAR_FLOWS or S_UPDATE_FLOWS, this is really the option we
>     have. */
>     @@ -144,13 +147,15 @@ static struct ofpbuf *encode_flow_mod(struct
>     ofputil_flow_mod *);
>
>      static struct ofpbuf *encode_group_mod(const struct
>     ofputil_group_mod *);
>
>     +static struct ofpbuf *encode_meter_mod(const struct
>     ofputil_meter_mod *);
>     +
>      static void ovn_flow_table_clear(struct hmap *flow_table);
>      static void ovn_flow_table_destroy(struct hmap *flow_table);
>
>      static void ofctrl_recv(const struct ofp_header *, enum ofptype);
>
>      void
>     -ofctrl_init(struct group_table *group_table)
>     +ofctrl_init(struct group_table *group_table, struct meter_table
>     *meter_table)
>      {
>          swconn = rconn_create(5, 0, DSCP_DEFAULT, 1 << OFP13_VERSION);
>          tx_counter = rconn_packet_counter_create();
>     @@ -158,6 +163,7 @@ ofctrl_init(struct group_table *group_table)
>          ovs_list_init(&flow_updates);
>          ovn_init_symtab(&symtab);
>          groups = group_table;
>     +    meters = meter_table;
>      }
>
>      /* S_NEW, for a new connection.
>     @@ -388,6 +394,18 @@ run_S_CLEAR_FLOWS(void)
>              ovn_group_table_clear(groups, true);
>          }
>
>     +    /* Send a meter_mod to delete all meters. */
>     +    struct ofputil_meter_mod mm;
>     +    memset(&mm, 0, sizeof mm);
>     +    mm.command = OFPMC13_DELETE;
>     +    mm.meter.meter_id = OFPM13_ALL;
>     +    queue_msg(encode_meter_mod(&mm));
>     +
>     +    /* Clear existing meters, to match the state of the switch. */
>     +    if (meters) {
>     +        ovn_meter_table_clear(meters, true);
>     +    }
>     +
>          /* All flow updates are irrelevant now. */
>          struct ofctrl_flow_update *fup, *next;
>          LIST_FOR_EACH_SAFE (fup, next, list_node, &flow_updates) {
>     @@ -797,7 +815,60 @@ add_group_mod(const struct ofputil_group_mod
>     *gm, struct ovs_list *msgs)
>          struct ofpbuf *msg = encode_group_mod(gm);
>          ovs_list_push_back(msgs, &msg->list_node);
>      }
>     -
>     +
>     +/* meter_table. */
>     +
>     +/* Finds and returns a meter_info in 'existing_meters' whose key
>     is identical
>     + * to 'target''s key, or NULL if there is none. */
>     +static struct meter_info *
>     +ovn_meter_lookup(struct hmap *exisiting_meters,
>     +                 const struct meter_info *target)
>     +{
>     +    struct meter_info *e;
>     +
>     +    HMAP_FOR_EACH_WITH_HASH(e, hmap_node, target->hmap_node.hash,
>     +                            exisiting_meters) {
>     +        if (e->meter_id == target->meter_id) {
>     +            return e;
>     +        }
>     +   }
>     +    return NULL;
>     +}
>     +
>     +/* Clear either desired_meters or existing_meters in meter_table. */
>     +void
>     +ovn_meter_table_clear(struct meter_table *meter_table, bool existing)
>     +{
>     +    struct meter_info *m, *next;
>     +    struct hmap *target_meter = existing
>     +                                ? &meter_table->existing_meters
>     +                                : &meter_table->desired_meters;
>     +
>     +    HMAP_FOR_EACH_SAFE (m, next, hmap_node, target_meter) {
>     +        hmap_remove(target_meter, &m->hmap_node);
>     +        /* Don't unset bitmap for desired meter_info if the meter_id
>     +         * was not freshly reserved. */
>     +        if (existing || m->new_meter_id) {
>     +            bitmap_set0(meter_table->meter_ids, m->meter_id);
>     +        }
>     +        ds_destroy(&m->meter);
>     +        free(m);
>     +    }
>     +}
>     +
>     +static struct ofpbuf *
>     +encode_meter_mod(const struct ofputil_meter_mod *mm)
>     +{
>     +    return ofputil_encode_meter_mod(OFP13_VERSION, mm);
>     +}
>     +
>     +static void
>     +add_meter_mod(const struct ofputil_meter_mod *mm, struct ovs_list
>     *msgs)
>     +{
>     +    struct ofpbuf *msg = encode_meter_mod(mm);
>     +    ovs_list_push_back(msgs, &msg->list_node);
>     +}
>     +
>      static void
>      add_ct_flush_zone(uint16_t zone_id, struct ovs_list *msgs)
>      {
>     @@ -833,6 +904,12 @@ ofctrl_can_put(void)
>       * 'groups->desired_groups' and frees them. (The hmap itself isn't
>       * destroyed.)
>       *
>     + * Replaces the meter table on the switch, if possible, by the
>     contents of
>     + * 'meters->desired_meters'.  Regardless of whether the meter table
>     + * is updated, this deletes all the meters from the
>     + * 'meters->desired_meters' and frees them. (The hmap itself isn't
>     + * destroyed.)
>     + *
>       * Sends conntrack flush messages to each zone in
>     'pending_ct_zones' that
>       * is in the CT_ZONE_OF_QUEUED state and then moves the zone into the
>       * CT_ZONE_OF_SENT state.
>     @@ -891,6 +968,35 @@ ofctrl_put(struct hmap *flow_table, struct
>     shash *pending_ct_zones,
>              }
>          }
>
>     +    /* Iterate through all the desired meters. If there are new ones,
>     +     * add them to the switch. */
>     +    struct meter_info *desired_meter;
>     +    HMAP_FOR_EACH(desired_meter, hmap_node,
>     &meters->desired_meters) {
>     +        if (!ovn_meter_lookup(&meters->existing_meters,
>     desired_meter)
>     +            && desired_meter->meter_id) {
>     +            /* Create and install new meter. */
>     +            struct ofputil_meter_mod mm;
>     +            enum ofputil_protocol usable_protocols;
>     +            char *error;
>     +            struct ds meter_string = DS_EMPTY_INITIALIZER;
>     +            ds_put_format(&meter_string, "meter=%u,%s",
>     +                          desired_meter->meter_id,
>     + ds_cstr(&desired_meter->meter));
>     +
>     +            error = parse_ofp_meter_mod_str(&mm,
>     ds_cstr(&meter_string),
>     +                                            OFPMC13_ADD,
>     &usable_protocols);
>     +            if (!error) {
>     +                add_meter_mod(&mm, &msgs);
>     +            } else {
>     +                static struct vlog_rate_limit rl =
>     VLOG_RATE_LIMIT_INIT(5, 1);
>     +                VLOG_ERR_RL(&rl, "new meter %s %s", error,
>     +                         ds_cstr(&meter_string));
>     +                free(error);
>     +            }
>     +            ds_destroy(&meter_string);
>     +        }
>     +    }
>     +
>          /* Iterate through all of the installed flows.  If any of
>     them are no
>           * longer desired, delete them; if any of them should have
>     different
>           * actions, update them. */
>     @@ -1012,6 +1118,54 @@ ofctrl_put(struct hmap *flow_table, struct
>     shash *pending_ct_zones,
>              }
>          }
>
>     +    /* Iterate through the installed meters from previous runs.
>     If they
>     +     * are not needed delete them. */
>     +    struct meter_info *installed_meter, *next_meter;
>     +    HMAP_FOR_EACH_SAFE(installed_meter, next_meter, hmap_node,
>     +                       &meters->existing_meters) {
>     +        if (!ovn_meter_lookup(&meters->desired_meters,
>     installed_meter)) {
>     +            /* Delete the meter. */
>     +            struct ofputil_meter_mod mm;
>     +            enum ofputil_protocol usable_protocols;
>     +            char *error;
>     +            struct ds meter_string = DS_EMPTY_INITIALIZER;
>     +            ds_put_format(&meter_string, "meter=%u",
>     installed_meter->meter_id);
>     +
>     +            error = parse_ofp_meter_mod_str(&mm,
>     ds_cstr(&meter_string),
>     +                                            OFPMC13_DELETE,
>     &usable_protocols);
>     +            if (!error) {
>     +                add_meter_mod(&mm, &msgs);
>     +            } else {
>     +                static struct vlog_rate_limit rl =
>     VLOG_RATE_LIMIT_INIT(5, 1);
>     +                VLOG_ERR_RL(&rl, "Error deleting meter %d: %s",
>     +                         installed_meter->meter_id, error);
>     +                free(error);
>     +            }
>     +            ds_destroy(&meter_string);
>     +
>     +            /* Remove 'installed_meter' from
>     'meters->existing_meters' */
>     +            hmap_remove(&meters->existing_meters,
>     &installed_meter->hmap_node);
>     +            ds_destroy(&installed_meter->meter);
>     +
>     +            /* Dealloc meter_id. */
>     +            bitmap_set0(meters->meter_ids,
>     installed_meter->meter_id);
>     +            free(installed_meter);
>     +        }
>     +    }
>     +
>     +    /* Move the contents of desired_meters to existing_meters. */
>     +    HMAP_FOR_EACH_SAFE(desired_meter, next_meter, hmap_node,
>     +                       &meters->desired_meters) {
>     +        hmap_remove(&meters->desired_meters,
>     &desired_meter->hmap_node);
>     +        if (!ovn_meter_lookup(&meters->existing_meters,
>     desired_meter)) {
>     +            hmap_insert(&meters->existing_meters,
>     &desired_meter->hmap_node,
>     +                        desired_meter->hmap_node.hash);
>     +        } else {
>     +           ds_destroy(&desired_meter->meter);
>     +           free(desired_meter);
>     +        }
>     +    }
>     +
>          if (!ovs_list_is_empty(&msgs)) {
>              /* Add a barrier to the list of messages. */
>              struct ofpbuf *barrier =
>     ofputil_encode_barrier_request(OFP13_VERSION);
>     diff --git a/ovn/controller/ofctrl.h b/ovn/controller/ofctrl.h
>     index d83f6aec4..e680e2d61 100644
>     --- a/ovn/controller/ofctrl.h
>     +++ b/ovn/controller/ofctrl.h
>     @@ -24,6 +24,7 @@
>
>      struct controller_ctx;
>      struct group_table;
>     +struct meter_table;
>      struct hmap;
>      struct match;
>      struct ofpbuf;
>     @@ -31,7 +32,7 @@ struct ovsrec_bridge;
>      struct shash;
>
>      /* Interface for OVN main loop. */
>     -void ofctrl_init(struct group_table *group_table);
>     +void ofctrl_init(struct group_table *group_table, struct
>     meter_table *meter_table);
>      enum mf_field_id ofctrl_run(const struct ovsrec_bridge *br_int,
>                                  struct shash *pending_ct_zones);
>      bool ofctrl_can_put(void);
>     @@ -58,4 +59,7 @@ void ofctrl_flow_table_clear(void);
>      void ovn_group_table_clear(struct group_table *group_table,
>                                 bool existing);
>
>     +void ovn_meter_table_clear(struct meter_table *meter_table,
>     +                           bool existing);
>     +
>      #endif /* ovn/ofctrl.h */
>     diff --git a/ovn/controller/ovn-controller.c
>     b/ovn/controller/ovn-controller.c
>     index a935a791c..c5926bc83 100644
>     --- a/ovn/controller/ovn-controller.c
>     +++ b/ovn/controller/ovn-controller.c
>     @@ -599,9 +599,16 @@ main(int argc, char *argv[])
>          hmap_init(&group_table.desired_groups);
>          hmap_init(&group_table.existing_groups);
>
>     +    /* Initialize meter ids for QoS. */
>     +    struct meter_table meter_table;
>     +    meter_table.meter_ids = bitmap_allocate(MAX_OVN_METERS);
>     +    bitmap_set1(meter_table.meter_ids, 0); /* Meter id 0 is
>     invalid. */
>     +    hmap_init(&meter_table.desired_meters);
>     +    hmap_init(&meter_table.existing_meters);
>     +
>          daemonize_complete();
>
>     -    ofctrl_init(&group_table);
>     +    ofctrl_init(&group_table, &meter_table);
>          pinctrl_init();
>          lflow_init();
>
>     @@ -711,8 +718,8 @@ main(int argc, char *argv[])
>                          struct hmap flow_table =
>     HMAP_INITIALIZER(&flow_table);
>                          lflow_run(&ctx, chassis,
>                                    &chassis_index, &local_datapaths,
>     &group_table,
>     -                              &addr_sets, &flow_table,
>     &active_tunnels,
>     -                              &local_lport_ids);
>     +                              &meter_table, &addr_sets, &flow_table,
>     +                              &active_tunnels, &local_lport_ids);
>
>                          if (chassis_id) {
>                              bfd_run(&ctx, br_int, chassis,
>     &local_datapaths,
>     @@ -856,6 +863,18 @@ main(int argc, char *argv[])
>          }
>          hmap_destroy(&group_table.existing_groups);
>
>     +    bitmap_free(meter_table.meter_ids);
>     +    hmap_destroy(&meter_table.desired_meters);
>     +
>     +    struct meter_info *installed_meter, *next_meter;
>     +    HMAP_FOR_EACH_SAFE(installed_meter, next_meter, hmap_node,
>     +                       &meter_table.existing_meters) {
>     +        hmap_remove(&meter_table.existing_meters,
>     &installed_meter->hmap_node);
>     +        ds_destroy(&installed_meter->meter);
>     +        free(installed_meter);
>     +    }
>     +    hmap_destroy(&meter_table.existing_meters);
>     +
>          ovsdb_idl_loop_destroy(&ovs_idl_loop);
>          ovsdb_idl_loop_destroy(&ovnsb_idl_loop);
>
>     diff --git a/ovn/lib/actions.c b/ovn/lib/actions.c
>     index d0d73b69c..9c1f5f963 100644
>     --- a/ovn/lib/actions.c
>     +++ b/ovn/lib/actions.c
>     @@ -1873,6 +1873,118 @@ ovnact_log_free(struct ovnact_log *log)
>          free(log->name);
>      }
>
>     +static void
>     +parse_set_meter_action(struct action_context *ctx)
>     +{
>     +    int rate;
>     +    int burst = 0;
>     +
>     +    if (ctx->pp->cur_ltable >= ctx->pp->n_tables) {
>     +        lexer_error(ctx->lexer,
>     +                    "\"set_meter\" action not allowed in last
>     table.");
>     +        return;
>     +    }
>     +
>     +    lexer_force_match(ctx->lexer, LEX_T_LPAREN);
>     +    lexer_force_int(ctx->lexer, &rate);
>     +    if (lexer_match(ctx->lexer, LEX_T_COMMA)) {
>     +        lexer_force_int(ctx->lexer, &burst);
>     +    }
>     +    lexer_force_match(ctx->lexer, LEX_T_RPAREN);
>     +
>     +    struct ovnact_set_meter *cl = ovnact_put_SET_METER(ctx->ovnacts);
>     +    cl->rate = (uint32_t)rate;
>     +    cl->burst = (uint32_t)burst;
>     +}
>     +
>     +static void
>     +format_SET_METER(const struct ovnact_set_meter *cl, struct ds *s)
>     +{
>     +    if (cl->burst) {
>     +        ds_put_format(s, "set_meter(%d ,%d);", cl->rate, cl->burst);
>     +    } else {
>     +        ds_put_format(s, "set_meter(%d);", cl->rate);
>     +    }
>     +}
>     +
>     +static void
>     +encode_SET_METER(const struct ovnact_set_meter *cl,
>     +                 const struct ovnact_encode_params *ep,
>     +                 struct ofpbuf *ofpacts)
>     +{
>     +    uint32_t meter_id = 0, hash;
>     +    struct meter_info *meter_info;
>     +    struct ofpact_meter *om;
>     +
>     +    struct ds ds = DS_EMPTY_INITIALIZER;
>     +    if (cl->burst) {
>     +        ds_put_format(&ds,
>     +                      "kbps burst stats bands=type=drop rate=%d
>     burst_size=%d",
>     +                      cl->rate, cl->burst);
>     +    } else {
>     +        ds_put_format(&ds, "kbps stats bands=type=drop rate=%d",
>     cl->rate);
>     +    }
>     +
>     +    hash = hash_string(ds_cstr(&ds), 0);
>     +
>     +    /* Check whether we have non installed but allocated meter_id. */
>     +    HMAP_FOR_EACH_WITH_HASH (meter_info, hmap_node, hash,
>     +  &ep->meter_table->desired_meters) {
>     +        if (!strcmp(ds_cstr(&meter_info->meter), ds_cstr(&ds))) {
>     +            meter_id = meter_info->meter_id;
>     +            break;
>     +        }
>     +    }
>     +
>     +    if (!meter_id) {
>     +        /* Check whether we already have an installed entry for this
>     +         * combination. */
>     +        HMAP_FOR_EACH_WITH_HASH (meter_info, hmap_node, hash,
>     +  &ep->meter_table->existing_meters) {
>     +            if (!strcmp(ds_cstr(&meter_info->meter), ds_cstr(&ds))) {
>     +                meter_id = meter_info->meter_id;
>     +            }
>     +        }
>     +
>     +        bool new_meter_id = false;
>     +        if (!meter_id) {
>     +            /* Reserve a new meter_id. */
>     +            meter_id = bitmap_scan(ep->meter_table->meter_ids, 0, 1,
>     +                                   MAX_OVN_METERS + 1);
>     +            new_meter_id = true;
>     +        }
>     +
>     +        if (meter_id == MAX_OVN_METERS + 1) {
>     +            static struct vlog_rate_limit rl =
>     VLOG_RATE_LIMIT_INIT(1, 1);
>     +            VLOG_ERR_RL(&rl, "out of meter ids");
>     +
>     +            ds_destroy(&ds);
>     +            return;
>     +        }
>     +        bitmap_set1(ep->meter_table->meter_ids, meter_id);
>     +
>     +        meter_info = xmalloc(sizeof *meter_info);
>     +        meter_info->meter = ds;
>     +        meter_info->meter_id = meter_id;
>     +        meter_info->hmap_node.hash = hash;
>     +        meter_info->new_meter_id = new_meter_id;
>     +
>     +        hmap_insert(&ep->meter_table->desired_meters,
>     +                    &meter_info->hmap_node,
>     meter_info->hmap_node.hash);
>     +    } else {
>     +        ds_destroy(&ds);
>     +    }
>     +
>     +    /* Create an action to set the meter. */
>     +    om = ofpact_put_METER(ofpacts);
>     +    om->meter_id = meter_id;
>     +}
>     +
>     +static void
>     +ovnact_set_meter_free(struct ovnact_set_meter *ct OVS_UNUSED)
>     +{
>     +}
>     +
>      /* Parses an assignment or exchange or put_dhcp_opts action. */
>      static void
>      parse_set_action(struct action_context *ctx)
>     @@ -1954,6 +2066,8 @@ parse_action(struct action_context *ctx)
>              parse_SET_QUEUE(ctx);
>          } else if (lexer_match_id(ctx->lexer, "log")) {
>              parse_LOG(ctx);
>     +    } else if (lexer_match_id(ctx->lexer, "set_meter")) {
>     +        parse_set_meter_action(ctx);
>          } else {
>              lexer_syntax_error(ctx->lexer, "expecting action");
>          }
>     diff --git a/ovn/northd/ovn-northd.8.xml b/ovn/northd/ovn-northd.8.xml
>     index 0d85ec0d2..5a2febb27 100644
>     --- a/ovn/northd/ovn-northd.8.xml
>     +++ b/ovn/northd/ovn-northd.8.xml
>     @@ -364,7 +364,28 @@
>            </li>
>          </ul>
>
>     -    <h3>Ingress Table 8: LB</h3>
>     +    <h3>Ingress Table 8: <code>from-lport</code> QoS meter</h3>
>     +
>     +    <p>
>     +      Logical flows in this table closely reproduce those in the
>     +      <code>QoS</code> table <code>bandwidth</code> column in the
>     +      <code>OVN_Northbound</code> database for the
>     <code>from-lport</code>
>     +      direction.
>     +    </p>
>     +
>     +    <ul>
>     +      <li>
>     +        For every qos_rules for every logical switch a flow will
>     be added at
>     +        priorities mentioned in the QoS table.
>     +      </li>
>     +
>     +      <li>
>     +        One priority-0 fallback flow that matches all packets and
>     advances to
>     +        the next table.
>     +      </li>
>     +    </ul>
>     +
>     +    <h3>Ingress Table 9: LB</h3>
>
>          <p>
>            It contains a priority-0 flow that simply moves traffic to
>     the next
>     @@ -377,7 +398,7 @@
>            connection.)
>          </p>
>
>     -    <h3>Ingress Table 9: Stateful</h3>
>     +    <h3>Ingress Table 10: Stateful</h3>
>
>          <ul>
>            <li>
>     @@ -414,7 +435,7 @@
>            </li>
>          </ul>
>
>     -    <h3>Ingress Table 10: ARP/ND responder</h3>
>     +    <h3>Ingress Table 11: ARP/ND responder</h3>
>
>          <p>
>            This table implements ARP/ND responder in a logical switch
>     for known
>     @@ -564,7 +585,7 @@ nd_na {
>            </li>
>          </ul>
>
>     -    <h3>Ingress Table 11: DHCP option processing</h3>
>     +    <h3>Ingress Table 12: DHCP option processing</h3>
>
>          <p>
>            This table adds the DHCPv4 options to a DHCPv4 packet from the
>     @@ -624,7 +645,7 @@ next;
>            </li>
>          </ul>
>
>     -    <h3>Ingress Table 12: DHCP responses</h3>
>     +    <h3>Ingress Table 13: DHCP responses</h3>
>
>          <p>
>            This table implements DHCP responder for the DHCP replies
>     generated by
>     @@ -706,7 +727,7 @@ output;
>            </li>
>          </ul>
>
>     -    <h3>Ingress Table 13 DNS Lookup</h3>
>     +    <h3>Ingress Table 14 DNS Lookup</h3>
>
>          <p>
>            This table looks up and resolves the DNS names to the
>     corresponding
>     @@ -735,7 +756,7 @@ reg0[4] = dns_lookup(); next;
>            </li>
>          </ul>
>
>     -    <h3>Ingress Table 14 DNS Responses</h3>
>     +    <h3>Ingress Table 15 DNS Responses</h3>
>
>          <p>
>            This table implements DNS responder for the DNS replies
>     generated by
>     @@ -770,7 +791,7 @@ output;
>            </li>
>          </ul>
>
>     -    <h3>Ingress Table 15 Destination Lookup</h3>
>     +    <h3>Ingress Table 16 Destination Lookup</h3>
>
>          <p>
>            This table implements switching behavior.  It contains
>     these logical
>     @@ -872,7 +893,14 @@ output;
>            <code>to-lport</code> qos rules.
>          </p>
>
>     -    <h3>Egress Table 6: Stateful</h3>
>     +    <h3>Egress Table 6: <code>to-lport</code> QoS meter</h3>
>     +
>     +    <p>
>     +      This is similar to ingress table <code>QoS meter</code>
>     except for
>     +      <code>to-lport</code> qos rules.
>     +    </p>
>     +
>     +    <h3>Egress Table 7: Stateful</h3>
>
>          <p>
>            This is similar to ingress table <code>Stateful</code>
>     except that
>     @@ -887,18 +915,18 @@ output;
>              A priority 34000 logical flow is added for each logical
>     port which
>              has DHCPv4 options defined to allow the DHCPv4 reply
>     packet and which has
>              DHCPv6 options defined to allow the DHCPv6 reply packet
>     from the
>     -        <code>Ingress Table 12: DHCP responses</code>.
>     +        <code>Ingress Table 13: DHCP responses</code>.
>            </li>
>
>            <li>
>              A priority 34000 logical flow is added for each logical
>     switch datapath
>              configured with DNS records with the match <code>udp.dst
>     = 53</code>
>              to allow the DNS reply packet from the
>     -        <code>Ingress Table 14:DNS responses</code>.
>     +        <code>Ingress Table 15:DNS responses</code>.
>            </li>
>          </ul>
>
>     -    <h3>Egress Table 7: Egress Port Security - IP</h3>
>     +    <h3>Egress Table 8: Egress Port Security - IP</h3>
>
>          <p>
>            This is similar to the port security logic in table
>     @@ -908,7 +936,7 @@ output;
>            <code>ip4.src</code> and <code>ip6.src</code>
>          </p>
>
>     -    <h3>Egress Table 8: Egress Port Security - L2</h3>
>     +    <h3>Egress Table 9: Egress Port Security - L2</h3>
>
>          <p>
>            This is similar to the ingress port security logic in
>     ingress table
>     diff --git a/ovn/northd/ovn-northd.c b/ovn/northd/ovn-northd.c
>     index 2db238073..4c0d6fcd0 100644
>     --- a/ovn/northd/ovn-northd.c
>     +++ b/ovn/northd/ovn-northd.c
>     @@ -108,25 +108,27 @@ enum ovn_stage {
>          PIPELINE_STAGE(SWITCH, IN,  PRE_STATEFUL,   5,
>     "ls_in_pre_stateful")  \
>          PIPELINE_STAGE(SWITCH, IN,  ACL,            6, "ls_in_acl") 
>              \
>          PIPELINE_STAGE(SWITCH, IN,  QOS_MARK,       7,
>     "ls_in_qos_mark")      \
>     -    PIPELINE_STAGE(SWITCH, IN,  LB,             8, "ls_in_lb")   
>             \
>     -    PIPELINE_STAGE(SWITCH, IN,  STATEFUL,       9,
>     "ls_in_stateful")      \
>     -    PIPELINE_STAGE(SWITCH, IN,  ARP_ND_RSP,    10,
>     "ls_in_arp_rsp")       \
>     -    PIPELINE_STAGE(SWITCH, IN,  DHCP_OPTIONS,  11,
>     "ls_in_dhcp_options")  \
>     -    PIPELINE_STAGE(SWITCH, IN,  DHCP_RESPONSE, 12,
>     "ls_in_dhcp_response") \
>     -    PIPELINE_STAGE(SWITCH, IN,  DNS_LOOKUP,      13,
>     "ls_in_dns_lookup") \
>     -    PIPELINE_STAGE(SWITCH, IN,  DNS_RESPONSE,  14,
>     "ls_in_dns_response") \
>     -    PIPELINE_STAGE(SWITCH, IN,  L2_LKUP,       15,
>     "ls_in_l2_lkup")       \
>     -           \
>     -    /* Logical switch egress stages. */            \
>     -    PIPELINE_STAGE(SWITCH, OUT, PRE_LB,       0,
>     "ls_out_pre_lb")     \
>     -    PIPELINE_STAGE(SWITCH, OUT, PRE_ACL,      1,
>     "ls_out_pre_acl")     \
>     -    PIPELINE_STAGE(SWITCH, OUT, PRE_STATEFUL, 2,
>     "ls_out_pre_stateful")  \
>     -    PIPELINE_STAGE(SWITCH, OUT, LB,           3, "ls_out_lb")   
>             \
>     +    PIPELINE_STAGE(SWITCH, IN,  QOS_METER,      8,
>     "ls_in_qos_meter")     \
>     +    PIPELINE_STAGE(SWITCH, IN,  LB,             9, "ls_in_lb")   
>             \
>     +    PIPELINE_STAGE(SWITCH, IN,  STATEFUL,      10,
>     "ls_in_stateful")      \
>     +    PIPELINE_STAGE(SWITCH, IN,  ARP_ND_RSP,    11,
>     "ls_in_arp_rsp")       \
>     +    PIPELINE_STAGE(SWITCH, IN,  DHCP_OPTIONS,  12,
>     "ls_in_dhcp_options")  \
>     +    PIPELINE_STAGE(SWITCH, IN,  DHCP_RESPONSE, 13,
>     "ls_in_dhcp_response") \
>     +    PIPELINE_STAGE(SWITCH, IN,  DNS_LOOKUP,    14,
>     "ls_in_dns_lookup")    \
>     +    PIPELINE_STAGE(SWITCH, IN,  DNS_RESPONSE,  15,
>     "ls_in_dns_response")  \
>     +    PIPELINE_STAGE(SWITCH, IN,  L2_LKUP,       16,
>     "ls_in_l2_lkup")       \
>     +               \
>     +    /* Logical switch egress stages. */                \
>     +    PIPELINE_STAGE(SWITCH, OUT, PRE_LB,       0,
>     "ls_out_pre_lb")         \
>     +    PIPELINE_STAGE(SWITCH, OUT, PRE_ACL,      1,
>     "ls_out_pre_acl")        \
>     +    PIPELINE_STAGE(SWITCH, OUT, PRE_STATEFUL, 2,
>     "ls_out_pre_stateful")   \
>     +    PIPELINE_STAGE(SWITCH, OUT, LB,           3, "ls_out_lb")   
>              \
>          PIPELINE_STAGE(SWITCH, OUT, ACL,          4, "ls_out_acl")   
>             \
>          PIPELINE_STAGE(SWITCH, OUT, QOS_MARK,     5,
>     "ls_out_qos_mark")       \
>     -    PIPELINE_STAGE(SWITCH, OUT, STATEFUL,     6,
>     "ls_out_stateful")       \
>     -    PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_IP,  7,
>     "ls_out_port_sec_ip")    \
>     -    PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_L2,  8,
>     "ls_out_port_sec_l2")    \
>     +    PIPELINE_STAGE(SWITCH, OUT, QOS_METER,    6,
>     "ls_out_qos_meter")      \
>     +    PIPELINE_STAGE(SWITCH, OUT, STATEFUL,     7,
>     "ls_out_stateful")       \
>     +    PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_IP,  8,
>     "ls_out_port_sec_ip")    \
>     +    PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_L2,  9,
>     "ls_out_port_sec_l2")    \
>                \
>          /* Logical router ingress stages. */           \
>          PIPELINE_STAGE(ROUTER, IN,  ADMISSION,   0,
>     "lr_in_admission")    \
>     @@ -3365,21 +3367,49 @@ static void
>      build_qos(struct ovn_datapath *od, struct hmap *lflows) {
>          ovn_lflow_add(lflows, od, S_SWITCH_IN_QOS_MARK, 0, "1", "next;");
>          ovn_lflow_add(lflows, od, S_SWITCH_OUT_QOS_MARK, 0, "1",
>     "next;");
>     +    ovn_lflow_add(lflows, od, S_SWITCH_IN_QOS_METER, 0, "1",
>     "next;");
>     +    ovn_lflow_add(lflows, od, S_SWITCH_OUT_QOS_METER, 0, "1",
>     "next;");
>
>          for (size_t i = 0; i < od->nbs->n_qos_rules; i++) {
>              struct nbrec_qos *qos = od->nbs->qos_rules[i];
>              bool ingress = !strcmp(qos->direction, "from-lport") ?
>     true :false;
>              enum ovn_stage stage = ingress ? S_SWITCH_IN_QOS_MARK :
>     S_SWITCH_OUT_QOS_MARK;
>     +        uint32_t rate = 0;
>     +        uint32_t burst = 0;
>     +
>     +        for (size_t j = 0; j < qos->n_action; j++) {
>     +            if (!strcmp(qos->key_action[j], "dscp")) {
>     +                struct ds dscp_action = DS_EMPTY_INITIALIZER;
>     +
>     +                ds_put_format(&dscp_action, "ip.dscp = %d; next;",
>     + (uint8_t)qos->value_action[j]);
>     +                ovn_lflow_add(lflows, od, stage,
>     +                              qos->priority,
>     +                              qos->match, ds_cstr(&dscp_action));
>     +                ds_destroy(&dscp_action);
>     +            }
>     +        }
>
>     -        if (!strcmp(qos->key_action, "dscp")) {
>     -            struct ds dscp_action = DS_EMPTY_INITIALIZER;
>     -
>     -            ds_put_format(&dscp_action, "ip.dscp = %d; next;",
>     -                          (uint8_t)qos->value_action);
>     +        for (size_t n = 0; n < qos->n_bandwidth; n++) {
>     +            if (!strcmp(qos->key_bandwidth[n], "rate")) {
>     +                rate = (uint32_t)qos->value_bandwidth[n];
>     +            } else if (!strcmp(qos->key_bandwidth[n], "burst")) {
>     +                burst = (uint32_t)qos->value_bandwidth[n];
>     +            }
>     +        }
>     +        if (rate) {
>     +            struct ds meter_action = DS_EMPTY_INITIALIZER;
>     +            stage = ingress ? S_SWITCH_IN_QOS_METER :
>     S_SWITCH_OUT_QOS_METER;
>     +            if (burst) {
>     +                ds_put_format(&meter_action, "set_meter(%d, %d);
>     next;",
>     +                              rate, burst);
>     +            } else {
>     +                ds_put_format(&meter_action, "set_meter(%d);
>     next;", rate);
>     +            }
>                  ovn_lflow_add(lflows, od, stage,
>                                qos->priority,
>     -                          qos->match, ds_cstr(&dscp_action));
>     -            ds_destroy(&dscp_action);
>     +                          qos->match, ds_cstr(&meter_action));
>     +            ds_destroy(&meter_action);
>              }
>          }
>      }
>     @@ -3489,7 +3519,7 @@ build_lswitch_flows(struct hmap *datapaths,
>     struct hmap *ports,
>          struct ds actions = DS_EMPTY_INITIALIZER;
>
>          /* Build pre-ACL and ACL tables for both ingress and egress.
>     -     * Ingress tables 3 through 9.  Egress tables 0 through 6. */
>     +     * Ingress tables 3 through 10.  Egress tables 0 through 7. */
>          struct ovn_datapath *od;
>          HMAP_FOR_EACH (od, key_node, datapaths) {
>              if (!od->nbs) {
>     @@ -3572,7 +3602,7 @@ build_lswitch_flows(struct hmap *datapaths,
>     struct hmap *ports,
>              ovn_lflow_add(lflows, od, S_SWITCH_IN_PORT_SEC_IP, 0,
>     "1", "next;");
>          }
>
>     -    /* Ingress table 10: ARP/ND responder, skip requests coming
>     from localnet
>     +    /* Ingress table 11: ARP/ND responder, skip requests coming
>     from localnet
>           * and vtep ports. (priority 100); see ovn-northd.8.xml for the
>           * rationale. */
>          HMAP_FOR_EACH (op, key_node, ports) {
>     @@ -3589,7 +3619,7 @@ build_lswitch_flows(struct hmap *datapaths,
>     struct hmap *ports,
>              }
>          }
>
>     -    /* Ingress table 10: ARP/ND responder, reply for known IPs.
>     +    /* Ingress table 11: ARP/ND responder, reply for known IPs.
>           * (priority 50). */
>          HMAP_FOR_EACH (op, key_node, ports) {
>              if (!op->nbsp) {
>     @@ -3684,7 +3714,7 @@ build_lswitch_flows(struct hmap *datapaths,
>     struct hmap *ports,
>              }
>          }
>
>     -    /* Ingress table 10: ARP/ND responder, by default goto next.
>     +    /* Ingress table 11: ARP/ND responder, by default goto next.
>           * (priority 0)*/
>          HMAP_FOR_EACH (od, key_node, datapaths) {
>              if (!od->nbs) {
>     @@ -3694,7 +3724,7 @@ build_lswitch_flows(struct hmap *datapaths,
>     struct hmap *ports,
>              ovn_lflow_add(lflows, od, S_SWITCH_IN_ARP_ND_RSP, 0, "1",
>     "next;");
>          }
>
>     -    /* Logical switch ingress table 11 and 12: DHCP options and
>     response
>     +    /* Logical switch ingress table 12 and 13: DHCP options and
>     response
>               * priority 100 flows. */
>          HMAP_FOR_EACH (op, key_node, ports) {
>              if (!op->nbsp) {
>     @@ -3796,7 +3826,7 @@ build_lswitch_flows(struct hmap *datapaths,
>     struct hmap *ports,
>              }
>          }
>
>     -    /* Logical switch ingress table 13 and 14: DNS lookup and
>     response
>     +    /* Logical switch ingress table 14 and 15: DNS lookup and
>     response
>           * priority 100 flows.
>           */
>          HMAP_FOR_EACH (od, key_node, datapaths) {
>     @@ -3828,9 +3858,9 @@ build_lswitch_flows(struct hmap *datapaths,
>     struct hmap *ports,
>              ds_destroy(&action);
>          }
>
>     -    /* Ingress table 11 and 12: DHCP options and response, by
>     default goto next.
>     +    /* Ingress table 12 and 13: DHCP options and response, by
>     default goto next.
>           * (priority 0).
>     -     * Ingress table 13 and 14: DNS lookup and response, by
>     default goto next.
>     +     * Ingress table 14 and 15: DNS lookup and response, by
>     default goto next.
>           * (priority 0).*/
>
>          HMAP_FOR_EACH (od, key_node, datapaths) {
>     @@ -3844,7 +3874,7 @@ build_lswitch_flows(struct hmap *datapaths,
>     struct hmap *ports,
>              ovn_lflow_add(lflows, od, S_SWITCH_IN_DNS_RESPONSE, 0,
>     "1", "next;");
>          }
>
>     -    /* Ingress table 15: Destination lookup, broadcast and
>     multicast handling
>     +    /* Ingress table 16: Destination lookup, broadcast and
>     multicast handling
>           * (priority 100). */
>          HMAP_FOR_EACH (op, key_node, ports) {
>              if (!op->nbsp) {
>     @@ -3864,7 +3894,7 @@ build_lswitch_flows(struct hmap *datapaths,
>     struct hmap *ports,
>                            "outport = \""MC_FLOOD"\"; output;");
>          }
>
>     -    /* Ingress table 13: Destination lookup, unicast handling
>     (priority 50), */
>     +    /* Ingress table 16: Destination lookup, unicast handling
>     (priority 50), */
>          HMAP_FOR_EACH (op, key_node, ports) {
>              if (!op->nbsp) {
>                  continue;
>     @@ -3964,7 +3994,7 @@ build_lswitch_flows(struct hmap *datapaths,
>     struct hmap *ports,
>              }
>          }
>
>     -    /* Ingress table 13: Destination lookup for unknown MACs
>     (priority 0). */
>     +    /* Ingress table 16: Destination lookup for unknown MACs
>     (priority 0). */
>          HMAP_FOR_EACH (od, key_node, datapaths) {
>              if (!od->nbs) {
>                  continue;
>     @@ -3976,8 +4006,8 @@ build_lswitch_flows(struct hmap *datapaths,
>     struct hmap *ports,
>              }
>          }
>
>     -    /* Egress tables 6: Egress port security - IP (priority 0)
>     -     * Egress table 7: Egress port security L2 - multicast/broadcast
>     +    /* Egress tables 8: Egress port security - IP (priority 0)
>     +     * Egress table 9: Egress port security L2 - multicast/broadcast
>           *                 (priority 100). */
>          HMAP_FOR_EACH (od, key_node, datapaths) {
>              if (!od->nbs) {
>     @@ -3989,10 +4019,10 @@ build_lswitch_flows(struct hmap
>     *datapaths, struct hmap *ports,
>                            "output;");
>          }
>
>     -    /* Egress table 6: Egress port security - IP (priorities 90
>     and 80)
>     +    /* Egress table 8: Egress port security - IP (priorities 90
>     and 80)
>           * if port security enabled.
>           *
>     -     * Egress table 7: Egress port security - L2 (priorities 50
>     and 150).
>     +     * Egress table 9: Egress port security - L2 (priorities 50
>     and 150).
>           *
>           * Priority 50 rules implement port security for enabled
>     logical port.
>           *
>     diff --git a/ovn/ovn-nb.ovsschema b/ovn/ovn-nb.ovsschema
>     index a077bfb81..23ab008cd 100644
>     --- a/ovn/ovn-nb.ovsschema
>     +++ b/ovn/ovn-nb.ovsschema
>     @@ -1,7 +1,7 @@
>      {
>          "name": "OVN_Northbound",
>          "version": "5.8.0",
>     -    "cksum": "2812300190 16766",
>     +    "cksum": "3886656843 17257",
>          "tables": {
>              "NB_Global": {
>                  "columns": {
>     @@ -164,7 +164,14 @@
>                                                  "enum": ["set",
>     ["dscp"]]},
>                                          "value": {"type": "integer",
>                                                    "minInteger": 0,
>     -                                              "maxInteger": 63}}},
>     +                                              "maxInteger": 63},
>     +                                    "min": 0, "max": "unlimited"}},
>     +                "bandwidth": {"type": {"key": {"type": "string",
>     +                                               "enum": ["set",
>     ["rate", "burst"]]},
>     +                                       "value": {"type": "integer",
>     +  "minInteger": 1,
>     +  "maxInteger": 65535},
>     +                                       "min": 0, "max":
>     "unlimited"}},
>                      "external_ids": {
>                          "type": {"key": "string", "value": "string",
>                                   "min": 0, "max": "unlimited"}}},
>     diff --git a/ovn/ovn-nb.xml b/ovn/ovn-nb.xml
>     index 9869d7ed7..c666ec2c4 100644
>     --- a/ovn/ovn-nb.xml
>     +++ b/ovn/ovn-nb.xml
>     @@ -1250,6 +1250,22 @@
>            </ul>
>          </column>
>
>     +    <column name="bandwidth">
>     +      <p>
>     +         The bandwidth limit to be performed on the matched packet.
>     +         Currently only supported in the userspace by dpdk.
>     +      </p>
>     +      <ul>
>     +        <li>
>     +          <code>rate</code>: The value of rate limit.
>     +        </li>
>     +        <li>
>     +          <code>burst</code>: The value of burst rate limit. This
>     is optional
>     +          and needs to specify the <code>rate</code> first.
>     +        </li>
>     +      </ul>
>     +    </column>
>     +
>          <column name="external_ids">
>            See <em>External IDs</em> at the beginning of this document.
>          </column>
>     diff --git a/ovn/ovn-sb.xml b/ovn/ovn-sb.xml
>     index 0a894f8cb..ee1db3f6f 100644
>     --- a/ovn/ovn-sb.xml
>     +++ b/ovn/ovn-sb.xml
>     @@ -1516,6 +1516,21 @@
>                  <b>Prerequisite:</b> <code>udp</code>
>                </p>
>              </dd>
>     +
>     +        <dt><code>set_meter(<var>rate</var>);</code></dt>
>     +        <dt><code>set_meter(<var>rate</var>,
>     <var>burst</var>);</code></dt>
>     +        <dd>
>     +          <p>
>     +            <b>Parameters</b>: rate limit int field
>     <var>rate</var>, burst rate limits
>     +            int field <var>burst</var>.
>     +          </p>
>     +
>     +          <p>
>     +            This action sets the rate limit for a flow.
>     +          </p>
>     +
>     +          <p><b>Example:</b> <code>set_meter(100, 1000);</code></p>
>     +        </dd>
>            </dl>
>
>            <dl>
>     diff --git a/ovn/utilities/ovn-trace.c b/ovn/utilities/ovn-trace.c
>     index 59083eebe..bb5593876 100644
>     --- a/ovn/utilities/ovn-trace.c
>     +++ b/ovn/utilities/ovn-trace.c
>     @@ -1833,6 +1833,10 @@ trace_actions(const struct ovnact *ovnacts,
>     size_t ovnacts_len,
>              case OVNACT_LOG:
>                  execute_log(ovnact_get_LOG(a), uflow, super);
>                  break;
>     +
>     +        case OVNACT_SET_METER:
>     +            /* Nothing to do. */
>     +            break;
>              }
>
>          }
>     diff --git a/tests/ovn.at <http://ovn.at> b/tests/ovn.at
>     <http://ovn.at>
>     index 6c38b973f..0e34f7df4 100644
>     --- a/tests/ovn.at <http://ovn.at>
>     +++ b/tests/ovn.at <http://ovn.at>
>     @@ -5885,7 +5885,7 @@ OVN_CLEANUP([hv])
>      AT_CLEANUP
>
>
>     -AT_SETUP([ovn -- DSCP marking check])
>     +AT_SETUP([ovn -- DSCP marking and meter check])
>      AT_KEYWORDS([ovn])
>      ovn_start
>
>     @@ -5952,13 +5952,16 @@ AT_CHECK([get_final_nw_tos], [0], [none
>      check_tos 0
>
>      # Mark DSCP with a valid value
>     -qos_id=$(ovn-nbctl --wait=hv -- --id=@lp1-qos create QoS
>     priority=100 action=dscp=48 match="inport\=\=\"lp1\""
>     direction="from-lport" -- set Logical_Switch lsw0 qos_rules=@lp1-qos)
>     +qos_id=$(ovn-nbctl --wait=hv -- --id=@lp1-qos create QoS
>     priority=100 action=dscp=48 bandwidth=rate=100,burst=1000
>     match="inport\=\=\"lp1\"" direction="from-lport" -- set
>     Logical_Switch lsw0 qos_rules=@lp1-qos)
>      check_tos 48
>
>      # Update the DSCP marking
>      ovn-nbctl --wait=hv set QoS $qos_id action=dscp=63
>      check_tos 63
>
>     +# Update the meter rate
>     +ovn-nbctl --wait=hv set QoS $qos_id bandwidth=rate=65535,burst=65535
>     +
>      ovn-nbctl --wait=hv set QoS $qos_id match="outport\=\=\"lp2\""
>     direction="to-lport"
>      check_tos 63
>
>     diff --git a/tests/test-ovn.c b/tests/test-ovn.c
>     index 4beb2b8d6..e9dcba231 100644
>     --- a/tests/test-ovn.c
>     +++ b/tests/test-ovn.c
>     @@ -1206,6 +1206,13 @@ test_parse_actions(struct ovs_cmdl_context
>     *ctx OVS_UNUSED)
>          hmap_init(&group_table.desired_groups);
>          hmap_init(&group_table.existing_groups);
>
>     +    /* Initialize meter ids for QoS. */
>     +    struct meter_table meter_table;
>     +    meter_table.meter_ids = bitmap_allocate(MAX_OVN_METERS);
>     +    bitmap_set1(meter_table.meter_ids, 0); /* Meter id 0 is
>     invalid. */
>     +    hmap_init(&meter_table.desired_meters);
>     +    hmap_init(&meter_table.existing_meters);
>     +
>          simap_init(&ports);
>          simap_put(&ports, "eth0", 5);
>          simap_put(&ports, "eth1", 6);
>     @@ -1244,6 +1251,7 @@ test_parse_actions(struct ovs_cmdl_context
>     *ctx OVS_UNUSED)
>                      .aux = &ports,
>                      .is_switch = true,
>                      .group_table = &group_table,
>     +                .meter_table = &meter_table,
>
>                      .pipeline = OVNACT_P_INGRESS,
>                      .ingress_ptable = 8,
>     --
>     2.13.2.windows.1
>
>     _______________________________________________
>     dev mailing list
>     dev@openvswitch.org <mailto:dev@openvswitch.org>
>     https://mail.openvswitch.org/mailman/listinfo/ovs-dev
>     <https://mail.openvswitch.org/mailman/listinfo/ovs-dev>
>
>
Ben Pfaff Nov. 3, 2017, 9:29 p.m. UTC | #3
On Wed, Sep 20, 2017 at 07:52:34PM +0800, Guoshuai Li wrote:
> ovn-northd modify:
> add bandwidth column in NB's QOS table.
> add QOS_METER stages in Logical switch ingress/egress.
> add set_meter() action in SB's LFlow table.
> 
> ovn-controller modify:
> add meter_table for meter action process openflow meter table.
> 
> This feature is only supported in DPDK.

My apologies for the delayed review.  Would you mind rebasing and
reposting?
Guoshuai Li Nov. 13, 2017, 12:26 p.m. UTC | #4
> On Wed, Sep 20, 2017 at 07:52:34PM +0800, Guoshuai Li wrote:
>> ovn-northd modify:
>> add bandwidth column in NB's QOS table.
>> add QOS_METER stages in Logical switch ingress/egress.
>> add set_meter() action in SB's LFlow table.
>>
>> ovn-controller modify:
>> add meter_table for meter action process openflow meter table.
>>
>> This feature is only supported in DPDK.
> My apologies for the delayed review.  Would you mind rebasing and
> reposting?
I send a new version for rebasing :

https://mail.openvswitch.org/pipermail/ovs-dev/2017-November/340856.html
diff mbox series

Patch

diff --git a/NEWS b/NEWS
index 6a5d2bf98..b97e7bff7 100644
--- a/NEWS
+++ b/NEWS
@@ -59,6 +59,7 @@  v2.8.0 - xx xxx xxxx
        gateway.
      * Add support for ACL logging.
      * ovn-northd now has native support for active-standby high availability.
+     * Add support for QoS bandwidth limt with DPDK.
    - Tracing with ofproto/trace now traces through recirculation.
    - OVSDB:
      * New support for role-based access control (see ovsdb-server(1)).
diff --git a/include/ovn/actions.h b/include/ovn/actions.h
index 0a04af7aa..8dbb895f3 100644
--- a/include/ovn/actions.h
+++ b/include/ovn/actions.h
@@ -72,7 +72,8 @@  struct simap;
     OVNACT(PUT_DHCPV6_OPTS,   ovnact_put_dhcp_opts)   \
     OVNACT(SET_QUEUE,         ovnact_set_queue)       \
     OVNACT(DNS_LOOKUP,        ovnact_dns_lookup)      \
-    OVNACT(LOG,               ovnact_log)
+    OVNACT(LOG,               ovnact_log)             \
+    OVNACT(SET_METER,         ovnact_set_meter)
 
 /* enum ovnact_type, with a member OVNACT_<ENUM> for each action. */
 enum OVS_PACKED_ENUM ovnact_type {
@@ -274,6 +275,13 @@  struct ovnact_log {
     char *name;
 };
 
+/* OVNACT_SET_METER. */
+struct ovnact_set_meter {
+    struct ovnact ovnact;
+    uint32_t rate;              /* 32-bit rate field. */
+    uint32_t burst;             /* 32-bit burst rate field. */
+};
+
 /* Internal use by the helpers below. */
 void ovnact_init(struct ovnact *, enum ovnact_type, size_t len);
 void *ovnact_put(struct ofpbuf *, enum ovnact_type, size_t len);
@@ -350,6 +358,24 @@  struct group_info {
                          * group_table's 'group_ids' bitmap. */
 };
 
+#define MAX_OVN_METERS 65535
+
+struct meter_table {
+    unsigned long *meter_ids;  /* Used as a bitmap with value set
+                                * for allocated meter ids in either
+                                * desired_meters or existing_meters. */
+    struct hmap desired_meters;
+    struct hmap existing_meters;
+};
+
+struct meter_info {
+    struct hmap_node hmap_node;
+    struct ds meter;
+    uint32_t meter_id;
+    bool new_meter_id;  /* 'True' if 'meter_id' was reserved from
+                         * meter_table's 'meter_ids' bitmap. */
+};
+
 enum action_opcode {
     /* "arp { ...actions... }".
      *
@@ -484,6 +510,9 @@  struct ovnact_encode_params {
     /* A struct to figure out the group_id for group actions. */
     struct group_table *group_table;
 
+    /* A struct to figure out the meter_id for meter actions. */
+    struct meter_table *meter_table;
+
     /* OVN maps each logical flow table (ltable), one-to-one, onto a physical
      * OpenFlow flow table (ptable).  A number of parameters describe this
      * mapping and data related to flow tables:
diff --git a/ovn/controller/lflow.c b/ovn/controller/lflow.c
index 20a18c259..bf7a59f32 100644
--- a/ovn/controller/lflow.c
+++ b/ovn/controller/lflow.c
@@ -62,6 +62,7 @@  static void consider_logical_flow(struct controller_ctx *ctx,
                                   const struct sbrec_logical_flow *lflow,
                                   const struct hmap *local_datapaths,
                                   struct group_table *group_table,
+                                  struct meter_table *meter_table,
                                   const struct sbrec_chassis *chassis,
                                   struct hmap *dhcp_opts,
                                   struct hmap *dhcpv6_opts,
@@ -143,6 +144,7 @@  add_logical_flows(struct controller_ctx *ctx,
                   const struct chassis_index *chassis_index,
                   const struct hmap *local_datapaths,
                   struct group_table *group_table,
+                  struct meter_table *meter_table,
                   const struct sbrec_chassis *chassis,
                   const struct shash *addr_sets,
                   struct hmap *flow_table,
@@ -170,7 +172,7 @@  add_logical_flows(struct controller_ctx *ctx,
     SBREC_LOGICAL_FLOW_FOR_EACH (lflow, ctx->ovnsb_idl) {
         consider_logical_flow(ctx, chassis_index,
                               lflow, local_datapaths,
-                              group_table, chassis,
+                              group_table, meter_table, chassis,
                               &dhcp_opts, &dhcpv6_opts, &conj_id_ofs,
                               addr_sets, flow_table, active_tunnels,
                               local_lport_ids);
@@ -186,6 +188,7 @@  consider_logical_flow(struct controller_ctx *ctx,
                       const struct sbrec_logical_flow *lflow,
                       const struct hmap *local_datapaths,
                       struct group_table *group_table,
+                      struct meter_table *meter_table,
                       const struct sbrec_chassis *chassis,
                       struct hmap *dhcp_opts,
                       struct hmap *dhcpv6_opts,
@@ -256,6 +259,7 @@  consider_logical_flow(struct controller_ctx *ctx,
         .is_switch = is_switch(ldp),
         .is_gateway_router = is_gateway_router(ldp, local_datapaths),
         .group_table = group_table,
+        .meter_table = meter_table,
 
         .pipeline = ingress ? OVNACT_P_INGRESS : OVNACT_P_EGRESS,
         .ingress_ptable = OFTABLE_LOG_INGRESS_PIPELINE,
@@ -428,13 +432,14 @@  lflow_run(struct controller_ctx *ctx,
           const struct chassis_index *chassis_index,
           const struct hmap *local_datapaths,
           struct group_table *group_table,
+          struct meter_table *meter_table,
           const struct shash *addr_sets,
           struct hmap *flow_table,
           struct sset *active_tunnels,
           struct sset *local_lport_ids)
 {
     add_logical_flows(ctx, chassis_index, local_datapaths,
-                      group_table, chassis, addr_sets, flow_table,
+                      group_table, meter_table, chassis, addr_sets, flow_table,
                       active_tunnels, local_lport_ids);
     add_neighbor_flows(ctx, flow_table);
 }
diff --git a/ovn/controller/lflow.h b/ovn/controller/lflow.h
index bfb7415e2..ad8d0a4c7 100644
--- a/ovn/controller/lflow.h
+++ b/ovn/controller/lflow.h
@@ -38,6 +38,7 @@ 
 struct chassis_index;
 struct controller_ctx;
 struct group_table;
+struct meter_table;
 struct hmap;
 struct sbrec_chassis;
 struct simap;
@@ -67,6 +68,7 @@  void lflow_run(struct controller_ctx *,
                const struct chassis_index *,
                const struct hmap *local_datapaths,
                struct group_table *group_table,
+               struct meter_table *meter_table,
                const struct shash *addr_sets,
                struct hmap *flow_table,
                struct sset *active_tunnels,
diff --git a/ovn/controller/ofctrl.c b/ovn/controller/ofctrl.c
index fc88a410b..d92258558 100644
--- a/ovn/controller/ofctrl.c
+++ b/ovn/controller/ofctrl.c
@@ -133,6 +133,9 @@  static struct hmap installed_flows;
 /* A reference to the group_table. */
 static struct group_table *groups;
 
+/* A reference to the meter_table. */
+static struct meter_table *meters;
+
 /* MFF_* field ID for our Geneve option.  In S_TLV_TABLE_MOD_SENT, this is
  * the option we requested (we don't know whether we obtained it yet).  In
  * S_CLEAR_FLOWS or S_UPDATE_FLOWS, this is really the option we have. */
@@ -144,13 +147,15 @@  static struct ofpbuf *encode_flow_mod(struct ofputil_flow_mod *);
 
 static struct ofpbuf *encode_group_mod(const struct ofputil_group_mod *);
 
+static struct ofpbuf *encode_meter_mod(const struct ofputil_meter_mod *);
+
 static void ovn_flow_table_clear(struct hmap *flow_table);
 static void ovn_flow_table_destroy(struct hmap *flow_table);
 
 static void ofctrl_recv(const struct ofp_header *, enum ofptype);
 
 void
-ofctrl_init(struct group_table *group_table)
+ofctrl_init(struct group_table *group_table, struct meter_table *meter_table)
 {
     swconn = rconn_create(5, 0, DSCP_DEFAULT, 1 << OFP13_VERSION);
     tx_counter = rconn_packet_counter_create();
@@ -158,6 +163,7 @@  ofctrl_init(struct group_table *group_table)
     ovs_list_init(&flow_updates);
     ovn_init_symtab(&symtab);
     groups = group_table;
+    meters = meter_table;
 }
 
 /* S_NEW, for a new connection.
@@ -388,6 +394,18 @@  run_S_CLEAR_FLOWS(void)
         ovn_group_table_clear(groups, true);
     }
 
+    /* Send a meter_mod to delete all meters. */
+    struct ofputil_meter_mod mm;
+    memset(&mm, 0, sizeof mm);
+    mm.command = OFPMC13_DELETE;
+    mm.meter.meter_id = OFPM13_ALL;
+    queue_msg(encode_meter_mod(&mm));
+
+    /* Clear existing meters, to match the state of the switch. */
+    if (meters) {
+        ovn_meter_table_clear(meters, true);
+    }
+
     /* All flow updates are irrelevant now. */
     struct ofctrl_flow_update *fup, *next;
     LIST_FOR_EACH_SAFE (fup, next, list_node, &flow_updates) {
@@ -797,7 +815,60 @@  add_group_mod(const struct ofputil_group_mod *gm, struct ovs_list *msgs)
     struct ofpbuf *msg = encode_group_mod(gm);
     ovs_list_push_back(msgs, &msg->list_node);
 }
-
+
+/* meter_table. */
+
+/* Finds and returns a meter_info in 'existing_meters' whose key is identical
+ * to 'target''s key, or NULL if there is none. */
+static struct meter_info *
+ovn_meter_lookup(struct hmap *exisiting_meters,
+                 const struct meter_info *target)
+{
+    struct meter_info *e;
+
+    HMAP_FOR_EACH_WITH_HASH(e, hmap_node, target->hmap_node.hash,
+                            exisiting_meters) {
+        if (e->meter_id == target->meter_id) {
+            return e;
+        }
+   }
+    return NULL;
+}
+
+/* Clear either desired_meters or existing_meters in meter_table. */
+void
+ovn_meter_table_clear(struct meter_table *meter_table, bool existing)
+{
+    struct meter_info *m, *next;
+    struct hmap *target_meter = existing
+                                ? &meter_table->existing_meters
+                                : &meter_table->desired_meters;
+
+    HMAP_FOR_EACH_SAFE (m, next, hmap_node, target_meter) {
+        hmap_remove(target_meter, &m->hmap_node);
+        /* Don't unset bitmap for desired meter_info if the meter_id
+         * was not freshly reserved. */
+        if (existing || m->new_meter_id) {
+            bitmap_set0(meter_table->meter_ids, m->meter_id);
+        }
+        ds_destroy(&m->meter);
+        free(m);
+    }
+}
+
+static struct ofpbuf *
+encode_meter_mod(const struct ofputil_meter_mod *mm)
+{
+    return ofputil_encode_meter_mod(OFP13_VERSION, mm);
+}
+
+static void
+add_meter_mod(const struct ofputil_meter_mod *mm, struct ovs_list *msgs)
+{
+    struct ofpbuf *msg = encode_meter_mod(mm);
+    ovs_list_push_back(msgs, &msg->list_node);
+}
+
 static void
 add_ct_flush_zone(uint16_t zone_id, struct ovs_list *msgs)
 {
@@ -833,6 +904,12 @@  ofctrl_can_put(void)
  * 'groups->desired_groups' and frees them. (The hmap itself isn't
  * destroyed.)
  *
+ * Replaces the meter table on the switch, if possible, by the contents of
+ * 'meters->desired_meters'.  Regardless of whether the meter table
+ * is updated, this deletes all the meters from the
+ * 'meters->desired_meters' and frees them. (The hmap itself isn't
+ * destroyed.)
+ *
  * Sends conntrack flush messages to each zone in 'pending_ct_zones' that
  * is in the CT_ZONE_OF_QUEUED state and then moves the zone into the
  * CT_ZONE_OF_SENT state.
@@ -891,6 +968,35 @@  ofctrl_put(struct hmap *flow_table, struct shash *pending_ct_zones,
         }
     }
 
+    /* Iterate through all the desired meters. If there are new ones,
+     * add them to the switch. */
+    struct meter_info *desired_meter;
+    HMAP_FOR_EACH(desired_meter, hmap_node, &meters->desired_meters) {
+        if (!ovn_meter_lookup(&meters->existing_meters, desired_meter)
+            && desired_meter->meter_id) {
+            /* Create and install new meter. */
+            struct ofputil_meter_mod mm;
+            enum ofputil_protocol usable_protocols;
+            char *error;
+            struct ds meter_string = DS_EMPTY_INITIALIZER;
+            ds_put_format(&meter_string, "meter=%u,%s",
+                          desired_meter->meter_id,
+                          ds_cstr(&desired_meter->meter));
+
+            error = parse_ofp_meter_mod_str(&mm, ds_cstr(&meter_string),
+                                            OFPMC13_ADD, &usable_protocols);
+            if (!error) {
+                add_meter_mod(&mm, &msgs);
+            } else {
+                static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+                VLOG_ERR_RL(&rl, "new meter %s %s", error,
+                         ds_cstr(&meter_string));
+                free(error);
+            }
+            ds_destroy(&meter_string);
+        }
+    }
+
     /* Iterate through all of the installed flows.  If any of them are no
      * longer desired, delete them; if any of them should have different
      * actions, update them. */
@@ -1012,6 +1118,54 @@  ofctrl_put(struct hmap *flow_table, struct shash *pending_ct_zones,
         }
     }
 
+    /* Iterate through the installed meters from previous runs. If they
+     * are not needed delete them. */
+    struct meter_info *installed_meter, *next_meter;
+    HMAP_FOR_EACH_SAFE(installed_meter, next_meter, hmap_node,
+                       &meters->existing_meters) {
+        if (!ovn_meter_lookup(&meters->desired_meters, installed_meter)) {
+            /* Delete the meter. */
+            struct ofputil_meter_mod mm;
+            enum ofputil_protocol usable_protocols;
+            char *error;
+            struct ds meter_string = DS_EMPTY_INITIALIZER;
+            ds_put_format(&meter_string, "meter=%u", installed_meter->meter_id);
+
+            error = parse_ofp_meter_mod_str(&mm, ds_cstr(&meter_string),
+                                            OFPMC13_DELETE, &usable_protocols);
+            if (!error) {
+                add_meter_mod(&mm, &msgs);
+            } else {
+                static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+                VLOG_ERR_RL(&rl, "Error deleting meter %d: %s",
+                         installed_meter->meter_id, error);
+                free(error);
+            }
+            ds_destroy(&meter_string);
+
+            /* Remove 'installed_meter' from 'meters->existing_meters' */
+            hmap_remove(&meters->existing_meters, &installed_meter->hmap_node);
+            ds_destroy(&installed_meter->meter);
+
+            /* Dealloc meter_id. */
+            bitmap_set0(meters->meter_ids, installed_meter->meter_id);
+            free(installed_meter);
+        }
+    }
+
+    /* Move the contents of desired_meters to existing_meters. */
+    HMAP_FOR_EACH_SAFE(desired_meter, next_meter, hmap_node,
+                       &meters->desired_meters) {
+        hmap_remove(&meters->desired_meters, &desired_meter->hmap_node);
+        if (!ovn_meter_lookup(&meters->existing_meters, desired_meter)) {
+            hmap_insert(&meters->existing_meters, &desired_meter->hmap_node,
+                        desired_meter->hmap_node.hash);
+        } else {
+           ds_destroy(&desired_meter->meter);
+           free(desired_meter);
+        }
+    }
+
     if (!ovs_list_is_empty(&msgs)) {
         /* Add a barrier to the list of messages. */
         struct ofpbuf *barrier = ofputil_encode_barrier_request(OFP13_VERSION);
diff --git a/ovn/controller/ofctrl.h b/ovn/controller/ofctrl.h
index d83f6aec4..e680e2d61 100644
--- a/ovn/controller/ofctrl.h
+++ b/ovn/controller/ofctrl.h
@@ -24,6 +24,7 @@ 
 
 struct controller_ctx;
 struct group_table;
+struct meter_table;
 struct hmap;
 struct match;
 struct ofpbuf;
@@ -31,7 +32,7 @@  struct ovsrec_bridge;
 struct shash;
 
 /* Interface for OVN main loop. */
-void ofctrl_init(struct group_table *group_table);
+void ofctrl_init(struct group_table *group_table, struct meter_table *meter_table);
 enum mf_field_id ofctrl_run(const struct ovsrec_bridge *br_int,
                             struct shash *pending_ct_zones);
 bool ofctrl_can_put(void);
@@ -58,4 +59,7 @@  void ofctrl_flow_table_clear(void);
 void ovn_group_table_clear(struct group_table *group_table,
                            bool existing);
 
+void ovn_meter_table_clear(struct meter_table *meter_table,
+                           bool existing);
+
 #endif /* ovn/ofctrl.h */
diff --git a/ovn/controller/ovn-controller.c b/ovn/controller/ovn-controller.c
index a935a791c..c5926bc83 100644
--- a/ovn/controller/ovn-controller.c
+++ b/ovn/controller/ovn-controller.c
@@ -599,9 +599,16 @@  main(int argc, char *argv[])
     hmap_init(&group_table.desired_groups);
     hmap_init(&group_table.existing_groups);
 
+    /* Initialize meter ids for QoS. */
+    struct meter_table meter_table;
+    meter_table.meter_ids = bitmap_allocate(MAX_OVN_METERS);
+    bitmap_set1(meter_table.meter_ids, 0); /* Meter id 0 is invalid. */
+    hmap_init(&meter_table.desired_meters);
+    hmap_init(&meter_table.existing_meters);
+
     daemonize_complete();
 
-    ofctrl_init(&group_table);
+    ofctrl_init(&group_table, &meter_table);
     pinctrl_init();
     lflow_init();
 
@@ -711,8 +718,8 @@  main(int argc, char *argv[])
                     struct hmap flow_table = HMAP_INITIALIZER(&flow_table);
                     lflow_run(&ctx, chassis,
                               &chassis_index, &local_datapaths, &group_table,
-                              &addr_sets, &flow_table, &active_tunnels,
-                              &local_lport_ids);
+                              &meter_table, &addr_sets, &flow_table,
+                              &active_tunnels, &local_lport_ids);
 
                     if (chassis_id) {
                         bfd_run(&ctx, br_int, chassis, &local_datapaths,
@@ -856,6 +863,18 @@  main(int argc, char *argv[])
     }
     hmap_destroy(&group_table.existing_groups);
 
+    bitmap_free(meter_table.meter_ids);
+    hmap_destroy(&meter_table.desired_meters);
+
+    struct meter_info *installed_meter, *next_meter;
+    HMAP_FOR_EACH_SAFE(installed_meter, next_meter, hmap_node,
+                       &meter_table.existing_meters) {
+        hmap_remove(&meter_table.existing_meters, &installed_meter->hmap_node);
+        ds_destroy(&installed_meter->meter);
+        free(installed_meter);
+    }
+    hmap_destroy(&meter_table.existing_meters);
+
     ovsdb_idl_loop_destroy(&ovs_idl_loop);
     ovsdb_idl_loop_destroy(&ovnsb_idl_loop);
 
diff --git a/ovn/lib/actions.c b/ovn/lib/actions.c
index d0d73b69c..9c1f5f963 100644
--- a/ovn/lib/actions.c
+++ b/ovn/lib/actions.c
@@ -1873,6 +1873,118 @@  ovnact_log_free(struct ovnact_log *log)
     free(log->name);
 }
 
+static void
+parse_set_meter_action(struct action_context *ctx)
+{
+    int rate;
+    int burst = 0;
+
+    if (ctx->pp->cur_ltable >= ctx->pp->n_tables) {
+        lexer_error(ctx->lexer,
+                    "\"set_meter\" action not allowed in last table.");
+        return;
+    }
+
+    lexer_force_match(ctx->lexer, LEX_T_LPAREN);
+    lexer_force_int(ctx->lexer, &rate);
+    if (lexer_match(ctx->lexer, LEX_T_COMMA)) {
+        lexer_force_int(ctx->lexer, &burst);
+    }
+    lexer_force_match(ctx->lexer, LEX_T_RPAREN);
+
+    struct ovnact_set_meter *cl = ovnact_put_SET_METER(ctx->ovnacts);
+    cl->rate = (uint32_t)rate;
+    cl->burst = (uint32_t)burst;
+}
+
+static void
+format_SET_METER(const struct ovnact_set_meter *cl, struct ds *s)
+{
+    if (cl->burst) {
+        ds_put_format(s, "set_meter(%d ,%d);", cl->rate, cl->burst);
+    } else {
+        ds_put_format(s, "set_meter(%d);", cl->rate);
+    }
+}
+
+static void
+encode_SET_METER(const struct ovnact_set_meter *cl,
+                 const struct ovnact_encode_params *ep,
+                 struct ofpbuf *ofpacts)
+{
+    uint32_t meter_id = 0, hash;
+    struct meter_info *meter_info;
+    struct ofpact_meter *om;
+
+    struct ds ds = DS_EMPTY_INITIALIZER;
+    if (cl->burst) {
+        ds_put_format(&ds,
+                      "kbps burst stats bands=type=drop rate=%d burst_size=%d",
+                      cl->rate, cl->burst);
+    } else {
+        ds_put_format(&ds, "kbps stats bands=type=drop rate=%d", cl->rate);
+    }
+
+    hash = hash_string(ds_cstr(&ds), 0);
+
+    /* Check whether we have non installed but allocated meter_id. */
+    HMAP_FOR_EACH_WITH_HASH (meter_info, hmap_node, hash,
+                             &ep->meter_table->desired_meters) {
+        if (!strcmp(ds_cstr(&meter_info->meter), ds_cstr(&ds))) {
+            meter_id = meter_info->meter_id;
+            break;
+        }
+    }
+
+    if (!meter_id) {
+        /* Check whether we already have an installed entry for this
+         * combination. */
+        HMAP_FOR_EACH_WITH_HASH (meter_info, hmap_node, hash,
+                                 &ep->meter_table->existing_meters) {
+            if (!strcmp(ds_cstr(&meter_info->meter), ds_cstr(&ds))) {
+                meter_id = meter_info->meter_id;
+            }
+        }
+
+        bool new_meter_id = false;
+        if (!meter_id) {
+            /* Reserve a new meter_id. */
+            meter_id = bitmap_scan(ep->meter_table->meter_ids, 0, 1,
+                                   MAX_OVN_METERS + 1);
+            new_meter_id = true;
+        }
+
+        if (meter_id == MAX_OVN_METERS + 1) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+            VLOG_ERR_RL(&rl, "out of meter ids");
+
+            ds_destroy(&ds);
+            return;
+        }
+        bitmap_set1(ep->meter_table->meter_ids, meter_id);
+
+        meter_info = xmalloc(sizeof *meter_info);
+        meter_info->meter = ds;
+        meter_info->meter_id = meter_id;
+        meter_info->hmap_node.hash = hash;
+        meter_info->new_meter_id = new_meter_id;
+
+        hmap_insert(&ep->meter_table->desired_meters,
+                    &meter_info->hmap_node, meter_info->hmap_node.hash);
+    } else {
+        ds_destroy(&ds);
+    }
+
+    /* Create an action to set the meter. */
+    om = ofpact_put_METER(ofpacts);
+    om->meter_id = meter_id;
+}
+
+static void
+ovnact_set_meter_free(struct ovnact_set_meter *ct OVS_UNUSED)
+{
+}
+
 /* Parses an assignment or exchange or put_dhcp_opts action. */
 static void
 parse_set_action(struct action_context *ctx)
@@ -1954,6 +2066,8 @@  parse_action(struct action_context *ctx)
         parse_SET_QUEUE(ctx);
     } else if (lexer_match_id(ctx->lexer, "log")) {
         parse_LOG(ctx);
+    } else if (lexer_match_id(ctx->lexer, "set_meter")) {
+        parse_set_meter_action(ctx);
     } else {
         lexer_syntax_error(ctx->lexer, "expecting action");
     }
diff --git a/ovn/northd/ovn-northd.8.xml b/ovn/northd/ovn-northd.8.xml
index 0d85ec0d2..5a2febb27 100644
--- a/ovn/northd/ovn-northd.8.xml
+++ b/ovn/northd/ovn-northd.8.xml
@@ -364,7 +364,28 @@ 
       </li>
     </ul>
 
-    <h3>Ingress Table 8: LB</h3>
+    <h3>Ingress Table 8: <code>from-lport</code> QoS meter</h3>
+
+    <p>
+      Logical flows in this table closely reproduce those in the
+      <code>QoS</code> table <code>bandwidth</code> column in the
+      <code>OVN_Northbound</code> database for the <code>from-lport</code>
+      direction.
+    </p>
+
+    <ul>
+      <li>
+        For every qos_rules for every logical switch a flow will be added at
+        priorities mentioned in the QoS table.
+      </li>
+
+      <li>
+        One priority-0 fallback flow that matches all packets and advances to
+        the next table.
+      </li>
+    </ul>
+
+    <h3>Ingress Table 9: LB</h3>
 
     <p>
       It contains a priority-0 flow that simply moves traffic to the next
@@ -377,7 +398,7 @@ 
       connection.)
     </p>
 
-    <h3>Ingress Table 9: Stateful</h3>
+    <h3>Ingress Table 10: Stateful</h3>
 
     <ul>
       <li>
@@ -414,7 +435,7 @@ 
       </li>
     </ul>
 
-    <h3>Ingress Table 10: ARP/ND responder</h3>
+    <h3>Ingress Table 11: ARP/ND responder</h3>
 
     <p>
       This table implements ARP/ND responder in a logical switch for known
@@ -564,7 +585,7 @@  nd_na {
       </li>
     </ul>
 
-    <h3>Ingress Table 11: DHCP option processing</h3>
+    <h3>Ingress Table 12: DHCP option processing</h3>
 
     <p>
       This table adds the DHCPv4 options to a DHCPv4 packet from the
@@ -624,7 +645,7 @@  next;
       </li>
     </ul>
 
-    <h3>Ingress Table 12: DHCP responses</h3>
+    <h3>Ingress Table 13: DHCP responses</h3>
 
     <p>
       This table implements DHCP responder for the DHCP replies generated by
@@ -706,7 +727,7 @@  output;
       </li>
     </ul>
 
-    <h3>Ingress Table 13 DNS Lookup</h3>
+    <h3>Ingress Table 14 DNS Lookup</h3>
 
     <p>
       This table looks up and resolves the DNS names to the corresponding
@@ -735,7 +756,7 @@  reg0[4] = dns_lookup(); next;
       </li>
     </ul>
 
-    <h3>Ingress Table 14 DNS Responses</h3>
+    <h3>Ingress Table 15 DNS Responses</h3>
 
     <p>
       This table implements DNS responder for the DNS replies generated by
@@ -770,7 +791,7 @@  output;
       </li>
     </ul>
 
-    <h3>Ingress Table 15 Destination Lookup</h3>
+    <h3>Ingress Table 16 Destination Lookup</h3>
 
     <p>
       This table implements switching behavior.  It contains these logical
@@ -872,7 +893,14 @@  output;
       <code>to-lport</code> qos rules.
     </p>
 
-    <h3>Egress Table 6: Stateful</h3>
+    <h3>Egress Table 6: <code>to-lport</code> QoS meter</h3>
+
+    <p>
+      This is similar to ingress table <code>QoS meter</code> except for
+      <code>to-lport</code> qos rules.
+    </p>
+
+    <h3>Egress Table 7: Stateful</h3>
 
     <p>
       This is similar to ingress table <code>Stateful</code> except that
@@ -887,18 +915,18 @@  output;
         A priority 34000 logical flow is added for each logical port which
         has DHCPv4 options defined to allow the DHCPv4 reply packet and which has
         DHCPv6 options defined to allow the DHCPv6 reply packet from the
-        <code>Ingress Table 12: DHCP responses</code>.
+        <code>Ingress Table 13: DHCP responses</code>.
       </li>
 
       <li>
         A priority 34000 logical flow is added for each logical switch datapath
         configured with DNS records with the match <code>udp.dst = 53</code>
         to allow the DNS reply packet from the
-        <code>Ingress Table 14:DNS responses</code>.
+        <code>Ingress Table 15:DNS responses</code>.
       </li>
     </ul>
 
-    <h3>Egress Table 7: Egress Port Security - IP</h3>
+    <h3>Egress Table 8: Egress Port Security - IP</h3>
 
     <p>
       This is similar to the port security logic in table
@@ -908,7 +936,7 @@  output;
       <code>ip4.src</code> and <code>ip6.src</code>
     </p>
 
-    <h3>Egress Table 8: Egress Port Security - L2</h3>
+    <h3>Egress Table 9: Egress Port Security - L2</h3>
 
     <p>
       This is similar to the ingress port security logic in ingress table
diff --git a/ovn/northd/ovn-northd.c b/ovn/northd/ovn-northd.c
index 2db238073..4c0d6fcd0 100644
--- a/ovn/northd/ovn-northd.c
+++ b/ovn/northd/ovn-northd.c
@@ -108,25 +108,27 @@  enum ovn_stage {
     PIPELINE_STAGE(SWITCH, IN,  PRE_STATEFUL,   5, "ls_in_pre_stateful")  \
     PIPELINE_STAGE(SWITCH, IN,  ACL,            6, "ls_in_acl")           \
     PIPELINE_STAGE(SWITCH, IN,  QOS_MARK,       7, "ls_in_qos_mark")      \
-    PIPELINE_STAGE(SWITCH, IN,  LB,             8, "ls_in_lb")            \
-    PIPELINE_STAGE(SWITCH, IN,  STATEFUL,       9, "ls_in_stateful")      \
-    PIPELINE_STAGE(SWITCH, IN,  ARP_ND_RSP,    10, "ls_in_arp_rsp")       \
-    PIPELINE_STAGE(SWITCH, IN,  DHCP_OPTIONS,  11, "ls_in_dhcp_options")  \
-    PIPELINE_STAGE(SWITCH, IN,  DHCP_RESPONSE, 12, "ls_in_dhcp_response") \
-    PIPELINE_STAGE(SWITCH, IN,  DNS_LOOKUP,      13, "ls_in_dns_lookup") \
-    PIPELINE_STAGE(SWITCH, IN,  DNS_RESPONSE,  14, "ls_in_dns_response") \
-    PIPELINE_STAGE(SWITCH, IN,  L2_LKUP,       15, "ls_in_l2_lkup")       \
-                                                                      \
-    /* Logical switch egress stages. */                               \
-    PIPELINE_STAGE(SWITCH, OUT, PRE_LB,       0, "ls_out_pre_lb")     \
-    PIPELINE_STAGE(SWITCH, OUT, PRE_ACL,      1, "ls_out_pre_acl")     \
-    PIPELINE_STAGE(SWITCH, OUT, PRE_STATEFUL, 2, "ls_out_pre_stateful")  \
-    PIPELINE_STAGE(SWITCH, OUT, LB,           3, "ls_out_lb")            \
+    PIPELINE_STAGE(SWITCH, IN,  QOS_METER,      8, "ls_in_qos_meter")     \
+    PIPELINE_STAGE(SWITCH, IN,  LB,             9, "ls_in_lb")            \
+    PIPELINE_STAGE(SWITCH, IN,  STATEFUL,      10, "ls_in_stateful")      \
+    PIPELINE_STAGE(SWITCH, IN,  ARP_ND_RSP,    11, "ls_in_arp_rsp")       \
+    PIPELINE_STAGE(SWITCH, IN,  DHCP_OPTIONS,  12, "ls_in_dhcp_options")  \
+    PIPELINE_STAGE(SWITCH, IN,  DHCP_RESPONSE, 13, "ls_in_dhcp_response") \
+    PIPELINE_STAGE(SWITCH, IN,  DNS_LOOKUP,    14, "ls_in_dns_lookup")    \
+    PIPELINE_STAGE(SWITCH, IN,  DNS_RESPONSE,  15, "ls_in_dns_response")  \
+    PIPELINE_STAGE(SWITCH, IN,  L2_LKUP,       16, "ls_in_l2_lkup")       \
+                                                                          \
+    /* Logical switch egress stages. */                                   \
+    PIPELINE_STAGE(SWITCH, OUT, PRE_LB,       0, "ls_out_pre_lb")         \
+    PIPELINE_STAGE(SWITCH, OUT, PRE_ACL,      1, "ls_out_pre_acl")        \
+    PIPELINE_STAGE(SWITCH, OUT, PRE_STATEFUL, 2, "ls_out_pre_stateful")   \
+    PIPELINE_STAGE(SWITCH, OUT, LB,           3, "ls_out_lb")             \
     PIPELINE_STAGE(SWITCH, OUT, ACL,          4, "ls_out_acl")            \
     PIPELINE_STAGE(SWITCH, OUT, QOS_MARK,     5, "ls_out_qos_mark")       \
-    PIPELINE_STAGE(SWITCH, OUT, STATEFUL,     6, "ls_out_stateful")       \
-    PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_IP,  7, "ls_out_port_sec_ip")    \
-    PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_L2,  8, "ls_out_port_sec_l2")    \
+    PIPELINE_STAGE(SWITCH, OUT, QOS_METER,    6, "ls_out_qos_meter")      \
+    PIPELINE_STAGE(SWITCH, OUT, STATEFUL,     7, "ls_out_stateful")       \
+    PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_IP,  8, "ls_out_port_sec_ip")    \
+    PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_L2,  9, "ls_out_port_sec_l2")    \
                                                                       \
     /* Logical router ingress stages. */                              \
     PIPELINE_STAGE(ROUTER, IN,  ADMISSION,   0, "lr_in_admission")    \
@@ -3365,21 +3367,49 @@  static void
 build_qos(struct ovn_datapath *od, struct hmap *lflows) {
     ovn_lflow_add(lflows, od, S_SWITCH_IN_QOS_MARK, 0, "1", "next;");
     ovn_lflow_add(lflows, od, S_SWITCH_OUT_QOS_MARK, 0, "1", "next;");
+    ovn_lflow_add(lflows, od, S_SWITCH_IN_QOS_METER, 0, "1", "next;");
+    ovn_lflow_add(lflows, od, S_SWITCH_OUT_QOS_METER, 0, "1", "next;");
 
     for (size_t i = 0; i < od->nbs->n_qos_rules; i++) {
         struct nbrec_qos *qos = od->nbs->qos_rules[i];
         bool ingress = !strcmp(qos->direction, "from-lport") ? true :false;
         enum ovn_stage stage = ingress ? S_SWITCH_IN_QOS_MARK : S_SWITCH_OUT_QOS_MARK;
+        uint32_t rate = 0;
+        uint32_t burst = 0;
+
+        for (size_t j = 0; j < qos->n_action; j++) {
+            if (!strcmp(qos->key_action[j], "dscp")) {
+                struct ds dscp_action = DS_EMPTY_INITIALIZER;
+
+                ds_put_format(&dscp_action, "ip.dscp = %d; next;",
+                              (uint8_t)qos->value_action[j]);
+                ovn_lflow_add(lflows, od, stage,
+                              qos->priority,
+                              qos->match, ds_cstr(&dscp_action));
+                ds_destroy(&dscp_action);
+            }
+        }
 
-        if (!strcmp(qos->key_action, "dscp")) {
-            struct ds dscp_action = DS_EMPTY_INITIALIZER;
-
-            ds_put_format(&dscp_action, "ip.dscp = %d; next;",
-                          (uint8_t)qos->value_action);
+        for (size_t n = 0; n < qos->n_bandwidth; n++) {
+            if (!strcmp(qos->key_bandwidth[n], "rate")) {
+                rate = (uint32_t)qos->value_bandwidth[n];
+            } else if (!strcmp(qos->key_bandwidth[n], "burst")) {
+                burst = (uint32_t)qos->value_bandwidth[n];
+            }
+        }
+        if (rate) {
+            struct ds meter_action = DS_EMPTY_INITIALIZER;
+            stage = ingress ? S_SWITCH_IN_QOS_METER : S_SWITCH_OUT_QOS_METER;
+            if (burst) {
+                ds_put_format(&meter_action, "set_meter(%d, %d); next;",
+                              rate, burst);
+            } else {
+                ds_put_format(&meter_action, "set_meter(%d); next;", rate);
+            }
             ovn_lflow_add(lflows, od, stage,
                           qos->priority,
-                          qos->match, ds_cstr(&dscp_action));
-            ds_destroy(&dscp_action);
+                          qos->match, ds_cstr(&meter_action));
+            ds_destroy(&meter_action);
         }
     }
 }
@@ -3489,7 +3519,7 @@  build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
     struct ds actions = DS_EMPTY_INITIALIZER;
 
     /* Build pre-ACL and ACL tables for both ingress and egress.
-     * Ingress tables 3 through 9.  Egress tables 0 through 6. */
+     * Ingress tables 3 through 10.  Egress tables 0 through 7. */
     struct ovn_datapath *od;
     HMAP_FOR_EACH (od, key_node, datapaths) {
         if (!od->nbs) {
@@ -3572,7 +3602,7 @@  build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
         ovn_lflow_add(lflows, od, S_SWITCH_IN_PORT_SEC_IP, 0, "1", "next;");
     }
 
-    /* Ingress table 10: ARP/ND responder, skip requests coming from localnet
+    /* Ingress table 11: ARP/ND responder, skip requests coming from localnet
      * and vtep ports. (priority 100); see ovn-northd.8.xml for the
      * rationale. */
     HMAP_FOR_EACH (op, key_node, ports) {
@@ -3589,7 +3619,7 @@  build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
         }
     }
 
-    /* Ingress table 10: ARP/ND responder, reply for known IPs.
+    /* Ingress table 11: ARP/ND responder, reply for known IPs.
      * (priority 50). */
     HMAP_FOR_EACH (op, key_node, ports) {
         if (!op->nbsp) {
@@ -3684,7 +3714,7 @@  build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
         }
     }
 
-    /* Ingress table 10: ARP/ND responder, by default goto next.
+    /* Ingress table 11: ARP/ND responder, by default goto next.
      * (priority 0)*/
     HMAP_FOR_EACH (od, key_node, datapaths) {
         if (!od->nbs) {
@@ -3694,7 +3724,7 @@  build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
         ovn_lflow_add(lflows, od, S_SWITCH_IN_ARP_ND_RSP, 0, "1", "next;");
     }
 
-    /* Logical switch ingress table 11 and 12: DHCP options and response
+    /* Logical switch ingress table 12 and 13: DHCP options and response
          * priority 100 flows. */
     HMAP_FOR_EACH (op, key_node, ports) {
         if (!op->nbsp) {
@@ -3796,7 +3826,7 @@  build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
         }
     }
 
-    /* Logical switch ingress table 13 and 14: DNS lookup and response
+    /* Logical switch ingress table 14 and 15: DNS lookup and response
      * priority 100 flows.
      */
     HMAP_FOR_EACH (od, key_node, datapaths) {
@@ -3828,9 +3858,9 @@  build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
         ds_destroy(&action);
     }
 
-    /* Ingress table 11 and 12: DHCP options and response, by default goto next.
+    /* Ingress table 12 and 13: DHCP options and response, by default goto next.
      * (priority 0).
-     * Ingress table 13 and 14: DNS lookup and response, by default goto next.
+     * Ingress table 14 and 15: DNS lookup and response, by default goto next.
      * (priority 0).*/
 
     HMAP_FOR_EACH (od, key_node, datapaths) {
@@ -3844,7 +3874,7 @@  build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
         ovn_lflow_add(lflows, od, S_SWITCH_IN_DNS_RESPONSE, 0, "1", "next;");
     }
 
-    /* Ingress table 15: Destination lookup, broadcast and multicast handling
+    /* Ingress table 16: Destination lookup, broadcast and multicast handling
      * (priority 100). */
     HMAP_FOR_EACH (op, key_node, ports) {
         if (!op->nbsp) {
@@ -3864,7 +3894,7 @@  build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
                       "outport = \""MC_FLOOD"\"; output;");
     }
 
-    /* Ingress table 13: Destination lookup, unicast handling (priority 50), */
+    /* Ingress table 16: Destination lookup, unicast handling (priority 50), */
     HMAP_FOR_EACH (op, key_node, ports) {
         if (!op->nbsp) {
             continue;
@@ -3964,7 +3994,7 @@  build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
         }
     }
 
-    /* Ingress table 13: Destination lookup for unknown MACs (priority 0). */
+    /* Ingress table 16: Destination lookup for unknown MACs (priority 0). */
     HMAP_FOR_EACH (od, key_node, datapaths) {
         if (!od->nbs) {
             continue;
@@ -3976,8 +4006,8 @@  build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
         }
     }
 
-    /* Egress tables 6: Egress port security - IP (priority 0)
-     * Egress table 7: Egress port security L2 - multicast/broadcast
+    /* Egress tables 8: Egress port security - IP (priority 0)
+     * Egress table 9: Egress port security L2 - multicast/broadcast
      *                 (priority 100). */
     HMAP_FOR_EACH (od, key_node, datapaths) {
         if (!od->nbs) {
@@ -3989,10 +4019,10 @@  build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
                       "output;");
     }
 
-    /* Egress table 6: Egress port security - IP (priorities 90 and 80)
+    /* Egress table 8: Egress port security - IP (priorities 90 and 80)
      * if port security enabled.
      *
-     * Egress table 7: Egress port security - L2 (priorities 50 and 150).
+     * Egress table 9: Egress port security - L2 (priorities 50 and 150).
      *
      * Priority 50 rules implement port security for enabled logical port.
      *
diff --git a/ovn/ovn-nb.ovsschema b/ovn/ovn-nb.ovsschema
index a077bfb81..23ab008cd 100644
--- a/ovn/ovn-nb.ovsschema
+++ b/ovn/ovn-nb.ovsschema
@@ -1,7 +1,7 @@ 
 {
     "name": "OVN_Northbound",
     "version": "5.8.0",
-    "cksum": "2812300190 16766",
+    "cksum": "3886656843 17257",
     "tables": {
         "NB_Global": {
             "columns": {
@@ -164,7 +164,14 @@ 
                                             "enum": ["set", ["dscp"]]},
                                     "value": {"type": "integer",
                                               "minInteger": 0,
-                                              "maxInteger": 63}}},
+                                              "maxInteger": 63},
+                                    "min": 0, "max": "unlimited"}},
+                "bandwidth": {"type": {"key": {"type": "string",
+                                               "enum": ["set", ["rate", "burst"]]},
+                                       "value": {"type": "integer",
+                                                 "minInteger": 1,
+                                                 "maxInteger": 65535},
+                                       "min": 0, "max": "unlimited"}},
                 "external_ids": {
                     "type": {"key": "string", "value": "string",
                              "min": 0, "max": "unlimited"}}},
diff --git a/ovn/ovn-nb.xml b/ovn/ovn-nb.xml
index 9869d7ed7..c666ec2c4 100644
--- a/ovn/ovn-nb.xml
+++ b/ovn/ovn-nb.xml
@@ -1250,6 +1250,22 @@ 
       </ul>
     </column>
 
+    <column name="bandwidth">
+      <p>
+         The bandwidth limit to be performed on the matched packet. 
+         Currently only supported in the userspace by dpdk.
+      </p>
+      <ul>
+        <li>
+          <code>rate</code>: The value of rate limit.
+        </li>
+        <li>
+          <code>burst</code>: The value of burst rate limit. This is optional
+          and needs to specify the <code>rate</code> first.
+        </li>
+      </ul>
+    </column>
+
     <column name="external_ids">
       See <em>External IDs</em> at the beginning of this document.
     </column>
diff --git a/ovn/ovn-sb.xml b/ovn/ovn-sb.xml
index 0a894f8cb..ee1db3f6f 100644
--- a/ovn/ovn-sb.xml
+++ b/ovn/ovn-sb.xml
@@ -1516,6 +1516,21 @@ 
             <b>Prerequisite:</b> <code>udp</code>
           </p>
         </dd>
+
+        <dt><code>set_meter(<var>rate</var>);</code></dt>
+        <dt><code>set_meter(<var>rate</var>, <var>burst</var>);</code></dt>
+        <dd>
+          <p>
+            <b>Parameters</b>: rate limit int field <var>rate</var>, burst rate limits
+            int field <var>burst</var>.
+          </p>
+
+          <p>
+            This action sets the rate limit for a flow.
+          </p>
+
+          <p><b>Example:</b> <code>set_meter(100, 1000);</code></p>
+        </dd>
       </dl>
 
       <dl>
diff --git a/ovn/utilities/ovn-trace.c b/ovn/utilities/ovn-trace.c
index 59083eebe..bb5593876 100644
--- a/ovn/utilities/ovn-trace.c
+++ b/ovn/utilities/ovn-trace.c
@@ -1833,6 +1833,10 @@  trace_actions(const struct ovnact *ovnacts, size_t ovnacts_len,
         case OVNACT_LOG:
             execute_log(ovnact_get_LOG(a), uflow, super);
             break;
+
+        case OVNACT_SET_METER:
+            /* Nothing to do. */
+            break;
         }
 
     }
diff --git a/tests/ovn.at b/tests/ovn.at
index 6c38b973f..0e34f7df4 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -5885,7 +5885,7 @@  OVN_CLEANUP([hv])
 AT_CLEANUP
 
 
-AT_SETUP([ovn -- DSCP marking check])
+AT_SETUP([ovn -- DSCP marking and meter check])
 AT_KEYWORDS([ovn])
 ovn_start
 
@@ -5952,13 +5952,16 @@  AT_CHECK([get_final_nw_tos], [0], [none
 check_tos 0
 
 # Mark DSCP with a valid value
-qos_id=$(ovn-nbctl --wait=hv -- --id=@lp1-qos create QoS priority=100 action=dscp=48 match="inport\=\=\"lp1\"" direction="from-lport" -- set Logical_Switch lsw0 qos_rules=@lp1-qos)
+qos_id=$(ovn-nbctl --wait=hv -- --id=@lp1-qos create QoS priority=100 action=dscp=48 bandwidth=rate=100,burst=1000 match="inport\=\=\"lp1\"" direction="from-lport" -- set Logical_Switch lsw0 qos_rules=@lp1-qos)
 check_tos 48
 
 # Update the DSCP marking
 ovn-nbctl --wait=hv set QoS $qos_id action=dscp=63
 check_tos 63
 
+# Update the meter rate
+ovn-nbctl --wait=hv set QoS $qos_id bandwidth=rate=65535,burst=65535
+
 ovn-nbctl --wait=hv set QoS $qos_id match="outport\=\=\"lp2\"" direction="to-lport"
 check_tos 63
 
diff --git a/tests/test-ovn.c b/tests/test-ovn.c
index 4beb2b8d6..e9dcba231 100644
--- a/tests/test-ovn.c
+++ b/tests/test-ovn.c
@@ -1206,6 +1206,13 @@  test_parse_actions(struct ovs_cmdl_context *ctx OVS_UNUSED)
     hmap_init(&group_table.desired_groups);
     hmap_init(&group_table.existing_groups);
 
+    /* Initialize meter ids for QoS. */
+    struct meter_table meter_table;
+    meter_table.meter_ids = bitmap_allocate(MAX_OVN_METERS);
+    bitmap_set1(meter_table.meter_ids, 0); /* Meter id 0 is invalid. */
+    hmap_init(&meter_table.desired_meters);
+    hmap_init(&meter_table.existing_meters);
+
     simap_init(&ports);
     simap_put(&ports, "eth0", 5);
     simap_put(&ports, "eth1", 6);
@@ -1244,6 +1251,7 @@  test_parse_actions(struct ovs_cmdl_context *ctx OVS_UNUSED)
                 .aux = &ports,
                 .is_switch = true,
                 .group_table = &group_table,
+                .meter_table = &meter_table,
 
                 .pipeline = OVNACT_P_INGRESS,
                 .ingress_ptable = 8,