diff mbox series

[ovs-dev,v10,1/5] dpif-netdev: Use microsecond granularity.

Message ID 1515755828-1848-2-git-send-email-i.maximets@samsung.com
State Superseded
Headers show
Series Output packet batching (Time-based). | expand

Commit Message

Ilya Maximets Jan. 12, 2018, 11:17 a.m. UTC
Upcoming time-based output batching will require microsecond
granularity for it's flexible configuration.

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
 lib/dpif-netdev.c | 27 ++++++++++++++-------------
 1 file changed, 14 insertions(+), 13 deletions(-)

Comments

Jan Scheurich Jan. 12, 2018, 3:53 p.m. UTC | #1
Acked-by: Jan Scheurich <jan.scheurich@ericsson.com>

> -----Original Message-----
> From: Ilya Maximets [mailto:i.maximets@samsung.com]
> Sent: Friday, 12 January, 2018 12:17
> To: ovs-dev@openvswitch.org
> Cc: Heetae Ahn <heetae82.ahn@samsung.com>; Bhanuprakash Bodireddy <bhanuprakash.bodireddy@intel.com>; Antonio Fischetti
> <antonio.fischetti@intel.com>; Eelco Chaudron <echaudro@redhat.com>; Ciara Loftus <ciara.loftus@intel.com>; Kevin Traynor
> <ktraynor@redhat.com>; Jan Scheurich <jan.scheurich@ericsson.com>; Billy O'Mahony <billy.o.mahony@intel.com>; Ian Stokes
> <ian.stokes@intel.com>; Ilya Maximets <i.maximets@samsung.com>
> Subject: [PATCH v10 1/5] dpif-netdev: Use microsecond granularity.
> 
> Upcoming time-based output batching will require microsecond
> granularity for it's flexible configuration.
> 
> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> ---
>  lib/dpif-netdev.c | 27 ++++++++++++++-------------
>  1 file changed, 14 insertions(+), 13 deletions(-)
> 
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> index dc26026..b35700d 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -180,12 +180,13 @@ struct emc_cache {
>  

>  /* Simple non-wildcarding single-priority classifier. */
> 
> -/* Time in ms between successive optimizations of the dpcls subtable vector */
> -#define DPCLS_OPTIMIZATION_INTERVAL 1000
> +/* Time in microseconds between successive optimizations of the dpcls
> + * subtable vector */
> +#define DPCLS_OPTIMIZATION_INTERVAL 1000000LL
> 
> -/* Time in ms of the interval in which rxq processing cycles used in
> - * rxq to pmd assignments is measured and stored. */
> -#define PMD_RXQ_INTERVAL_LEN 10000
> +/* Time in microseconds of the interval in which rxq processing cycles used
> + * in rxq to pmd assignments is measured and stored. */
> +#define PMD_RXQ_INTERVAL_LEN 10000000LL
> 
>  /* Number of intervals for which cycles are stored
>   * and used during rxq to pmd assignment. */
> @@ -341,7 +342,7 @@ enum rxq_cycles_counter_type {
>      RXQ_N_CYCLES
>  };
> 
> -#define XPS_TIMEOUT_MS 500LL
> +#define XPS_TIMEOUT 500000LL    /* In microseconds. */
> 
>  /* Contained by struct dp_netdev_port's 'rxqs' member.  */
>  struct dp_netdev_rxq {
> @@ -758,7 +759,7 @@ emc_cache_slow_sweep(struct emc_cache *flow_cache)
>  static inline void
>  pmd_thread_ctx_time_update(struct dp_netdev_pmd_thread *pmd)
>  {
> -    pmd->ctx.now = time_msec();
> +    pmd->ctx.now = time_usec();
>  }
> 
>  /* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
> @@ -4145,7 +4146,7 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_,
>      memset(exceeded_rate, 0, cnt * sizeof *exceeded_rate);
> 
>      /* All packets will hit the meter at the same time. */
> -    long_delta_t = (now - meter->used); /* msec */
> +    long_delta_t = (now - meter->used) / 1000; /* msec */
> 
>      /* Make sure delta_t will not be too large, so that bucket will not
>       * wrap around below. */
> @@ -4301,7 +4302,7 @@ dpif_netdev_meter_set(struct dpif *dpif, ofproto_meter_id *meter_id,
>          meter->flags = config->flags;
>          meter->n_bands = config->n_bands;
>          meter->max_delta_t = 0;
> -        meter->used = time_msec();
> +        meter->used = time_usec();
> 
>          /* set up bands */
>          for (i = 0; i < config->n_bands; ++i) {
> @@ -4843,7 +4844,7 @@ packet_batch_per_flow_execute(struct packet_batch_per_flow *batch,
>      struct dp_netdev_flow *flow = batch->flow;
> 
>      dp_netdev_flow_used(flow, batch->array.count, batch->byte_count,
> -                        batch->tcp_flags, pmd->ctx.now);
> +                        batch->tcp_flags, pmd->ctx.now / 1000);
> 
>      actions = dp_netdev_flow_get_actions(flow);
> 
> @@ -5228,7 +5229,7 @@ dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd,
>              continue;
>          }
>          interval = pmd->ctx.now - tx->last_used;
> -        if (tx->qid >= 0 && (purge || interval >= XPS_TIMEOUT_MS)) {
> +        if (tx->qid >= 0 && (purge || interval >= XPS_TIMEOUT)) {
>              port = tx->port;
>              ovs_mutex_lock(&port->txq_used_mutex);
>              port->txq_used[tx->qid]--;
> @@ -5249,7 +5250,7 @@ dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread *pmd,
>      interval = pmd->ctx.now - tx->last_used;
>      tx->last_used = pmd->ctx.now;
> 
> -    if (OVS_LIKELY(tx->qid >= 0 && interval < XPS_TIMEOUT_MS)) {
> +    if (OVS_LIKELY(tx->qid >= 0 && interval < XPS_TIMEOUT)) {
>          return tx->qid;
>      }
> 
> @@ -5628,7 +5629,7 @@ dp_execute_cb(void *aux_, struct dp_packet_batch *packets_,
>          conntrack_execute(&dp->conntrack, packets_, aux->flow->dl_type, force,
>                            commit, zone, setmark, setlabel, aux->flow->tp_src,
>                            aux->flow->tp_dst, helper, nat_action_info_ref,
> -                          pmd->ctx.now);
> +                          pmd->ctx.now / 1000);
>          break;
>      }
> 
> --
> 2.7.4
Stokes, Ian Jan. 13, 2018, 1:34 p.m. UTC | #2
LGTM,

Acked.

> -----Original Message-----
> From: Ilya Maximets [mailto:i.maximets@samsung.com]
> Sent: Friday, January 12, 2018 11:17 AM
> To: ovs-dev@openvswitch.org
> Cc: Heetae Ahn <heetae82.ahn@samsung.com>; Bodireddy, Bhanuprakash
> <bhanuprakash.bodireddy@intel.com>; Fischetti, Antonio
> <antonio.fischetti@intel.com>; Eelco Chaudron <echaudro@redhat.com>;
> Loftus, Ciara <ciara.loftus@intel.com>; Kevin Traynor
> <ktraynor@redhat.com>; Jan Scheurich <jan.scheurich@ericsson.com>; O
> Mahony, Billy <billy.o.mahony@intel.com>; Stokes, Ian
> <ian.stokes@intel.com>; Ilya Maximets <i.maximets@samsung.com>
> Subject: [PATCH v10 1/5] dpif-netdev: Use microsecond granularity.
> 
> Upcoming time-based output batching will require microsecond granularity
> for it's flexible configuration.
> 
> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> ---
>  lib/dpif-netdev.c | 27 ++++++++++++++-------------
>  1 file changed, 14 insertions(+), 13 deletions(-)
> 
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index dc26026..b35700d
> 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -180,12 +180,13 @@ struct emc_cache {
>  

>  /* Simple non-wildcarding single-priority classifier. */
> 
> -/* Time in ms between successive optimizations of the dpcls subtable
> vector */ -#define DPCLS_OPTIMIZATION_INTERVAL 1000
> +/* Time in microseconds between successive optimizations of the dpcls
> + * subtable vector */
> +#define DPCLS_OPTIMIZATION_INTERVAL 1000000LL
> 
> -/* Time in ms of the interval in which rxq processing cycles used in
> - * rxq to pmd assignments is measured and stored. */ -#define
> PMD_RXQ_INTERVAL_LEN 10000
> +/* Time in microseconds of the interval in which rxq processing cycles
> +used
> + * in rxq to pmd assignments is measured and stored. */ #define
> +PMD_RXQ_INTERVAL_LEN 10000000LL
> 
>  /* Number of intervals for which cycles are stored
>   * and used during rxq to pmd assignment. */ @@ -341,7 +342,7 @@ enum
> rxq_cycles_counter_type {
>      RXQ_N_CYCLES
>  };
> 
> -#define XPS_TIMEOUT_MS 500LL
> +#define XPS_TIMEOUT 500000LL    /* In microseconds. */
> 
>  /* Contained by struct dp_netdev_port's 'rxqs' member.  */  struct
> dp_netdev_rxq { @@ -758,7 +759,7 @@ emc_cache_slow_sweep(struct emc_cache
> *flow_cache)  static inline void  pmd_thread_ctx_time_update(struct
> dp_netdev_pmd_thread *pmd)  {
> -    pmd->ctx.now = time_msec();
> +    pmd->ctx.now = time_usec();
>  }
> 
>  /* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
> @@ -4145,7 +4146,7 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct
> dp_packet_batch *packets_,
>      memset(exceeded_rate, 0, cnt * sizeof *exceeded_rate);
> 
>      /* All packets will hit the meter at the same time. */
> -    long_delta_t = (now - meter->used); /* msec */
> +    long_delta_t = (now - meter->used) / 1000; /* msec */
> 
>      /* Make sure delta_t will not be too large, so that bucket will not
>       * wrap around below. */
> @@ -4301,7 +4302,7 @@ dpif_netdev_meter_set(struct dpif *dpif,
> ofproto_meter_id *meter_id,
>          meter->flags = config->flags;
>          meter->n_bands = config->n_bands;
>          meter->max_delta_t = 0;
> -        meter->used = time_msec();
> +        meter->used = time_usec();
> 
>          /* set up bands */
>          for (i = 0; i < config->n_bands; ++i) { @@ -4843,7 +4844,7 @@
> packet_batch_per_flow_execute(struct packet_batch_per_flow *batch,
>      struct dp_netdev_flow *flow = batch->flow;
> 
>      dp_netdev_flow_used(flow, batch->array.count, batch->byte_count,
> -                        batch->tcp_flags, pmd->ctx.now);
> +                        batch->tcp_flags, pmd->ctx.now / 1000);
> 
>      actions = dp_netdev_flow_get_actions(flow);
> 
> @@ -5228,7 +5229,7 @@ dpif_netdev_xps_revalidate_pmd(const struct
> dp_netdev_pmd_thread *pmd,
>              continue;
>          }
>          interval = pmd->ctx.now - tx->last_used;
> -        if (tx->qid >= 0 && (purge || interval >= XPS_TIMEOUT_MS)) {
> +        if (tx->qid >= 0 && (purge || interval >= XPS_TIMEOUT)) {
>              port = tx->port;
>              ovs_mutex_lock(&port->txq_used_mutex);
>              port->txq_used[tx->qid]--;
> @@ -5249,7 +5250,7 @@ dpif_netdev_xps_get_tx_qid(const struct
> dp_netdev_pmd_thread *pmd,
>      interval = pmd->ctx.now - tx->last_used;
>      tx->last_used = pmd->ctx.now;
> 
> -    if (OVS_LIKELY(tx->qid >= 0 && interval < XPS_TIMEOUT_MS)) {
> +    if (OVS_LIKELY(tx->qid >= 0 && interval < XPS_TIMEOUT)) {
>          return tx->qid;
>      }
> 
> @@ -5628,7 +5629,7 @@ dp_execute_cb(void *aux_, struct dp_packet_batch
> *packets_,
>          conntrack_execute(&dp->conntrack, packets_, aux->flow->dl_type,
> force,
>                            commit, zone, setmark, setlabel, aux->flow-
> >tp_src,
>                            aux->flow->tp_dst, helper, nat_action_info_ref,
> -                          pmd->ctx.now);
> +                          pmd->ctx.now / 1000);
>          break;
>      }
> 
> --
> 2.7.4
diff mbox series

Patch

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index dc26026..b35700d 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -180,12 +180,13 @@  struct emc_cache {
 
 /* Simple non-wildcarding single-priority classifier. */
 
-/* Time in ms between successive optimizations of the dpcls subtable vector */
-#define DPCLS_OPTIMIZATION_INTERVAL 1000
+/* Time in microseconds between successive optimizations of the dpcls
+ * subtable vector */
+#define DPCLS_OPTIMIZATION_INTERVAL 1000000LL
 
-/* Time in ms of the interval in which rxq processing cycles used in
- * rxq to pmd assignments is measured and stored. */
-#define PMD_RXQ_INTERVAL_LEN 10000
+/* Time in microseconds of the interval in which rxq processing cycles used
+ * in rxq to pmd assignments is measured and stored. */
+#define PMD_RXQ_INTERVAL_LEN 10000000LL
 
 /* Number of intervals for which cycles are stored
  * and used during rxq to pmd assignment. */
@@ -341,7 +342,7 @@  enum rxq_cycles_counter_type {
     RXQ_N_CYCLES
 };
 
-#define XPS_TIMEOUT_MS 500LL
+#define XPS_TIMEOUT 500000LL    /* In microseconds. */
 
 /* Contained by struct dp_netdev_port's 'rxqs' member.  */
 struct dp_netdev_rxq {
@@ -758,7 +759,7 @@  emc_cache_slow_sweep(struct emc_cache *flow_cache)
 static inline void
 pmd_thread_ctx_time_update(struct dp_netdev_pmd_thread *pmd)
 {
-    pmd->ctx.now = time_msec();
+    pmd->ctx.now = time_usec();
 }
 
 /* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
@@ -4145,7 +4146,7 @@  dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_,
     memset(exceeded_rate, 0, cnt * sizeof *exceeded_rate);
 
     /* All packets will hit the meter at the same time. */
-    long_delta_t = (now - meter->used); /* msec */
+    long_delta_t = (now - meter->used) / 1000; /* msec */
 
     /* Make sure delta_t will not be too large, so that bucket will not
      * wrap around below. */
@@ -4301,7 +4302,7 @@  dpif_netdev_meter_set(struct dpif *dpif, ofproto_meter_id *meter_id,
         meter->flags = config->flags;
         meter->n_bands = config->n_bands;
         meter->max_delta_t = 0;
-        meter->used = time_msec();
+        meter->used = time_usec();
 
         /* set up bands */
         for (i = 0; i < config->n_bands; ++i) {
@@ -4843,7 +4844,7 @@  packet_batch_per_flow_execute(struct packet_batch_per_flow *batch,
     struct dp_netdev_flow *flow = batch->flow;
 
     dp_netdev_flow_used(flow, batch->array.count, batch->byte_count,
-                        batch->tcp_flags, pmd->ctx.now);
+                        batch->tcp_flags, pmd->ctx.now / 1000);
 
     actions = dp_netdev_flow_get_actions(flow);
 
@@ -5228,7 +5229,7 @@  dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd,
             continue;
         }
         interval = pmd->ctx.now - tx->last_used;
-        if (tx->qid >= 0 && (purge || interval >= XPS_TIMEOUT_MS)) {
+        if (tx->qid >= 0 && (purge || interval >= XPS_TIMEOUT)) {
             port = tx->port;
             ovs_mutex_lock(&port->txq_used_mutex);
             port->txq_used[tx->qid]--;
@@ -5249,7 +5250,7 @@  dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread *pmd,
     interval = pmd->ctx.now - tx->last_used;
     tx->last_used = pmd->ctx.now;
 
-    if (OVS_LIKELY(tx->qid >= 0 && interval < XPS_TIMEOUT_MS)) {
+    if (OVS_LIKELY(tx->qid >= 0 && interval < XPS_TIMEOUT)) {
         return tx->qid;
     }
 
@@ -5628,7 +5629,7 @@  dp_execute_cb(void *aux_, struct dp_packet_batch *packets_,
         conntrack_execute(&dp->conntrack, packets_, aux->flow->dl_type, force,
                           commit, zone, setmark, setlabel, aux->flow->tp_src,
                           aux->flow->tp_dst, helper, nat_action_info_ref,
-                          pmd->ctx.now);
+                          pmd->ctx.now / 1000);
         break;
     }