@@ -631,6 +631,34 @@ static inline bool tc_in_hw(u32 flags)
return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
}
+static inline enum tca_cls_hw_stats_type
+tc_hw_stats_type_get(struct nlattr *hw_stats_type_attr)
+{
+ /* If the user did not pass the attr, that means he does
+ * not care about the type. Return "any" in that case.
+ */
+ return hw_stats_type_attr ? nla_get_u8(hw_stats_type_attr) :
+ TCA_CLS_HW_STATS_TYPE_ANY;
+}
+
+static inline enum flow_cls_hw_stats_type
+tc_flow_cls_hw_stats_type(enum tca_cls_hw_stats_type hw_stats_type)
+{
+ switch (hw_stats_type) {
+ default:
+ WARN_ON(1);
+ /* fall-through */
+ case TCA_CLS_HW_STATS_TYPE_ANY:
+ return FLOW_CLS_HW_STATS_TYPE_ANY;
+ case TCA_CLS_HW_STATS_TYPE_IMMEDIATE:
+ return FLOW_CLS_HW_STATS_TYPE_IMMEDIATE;
+ case TCA_CLS_HW_STATS_TYPE_DELAYED:
+ return FLOW_CLS_HW_STATS_TYPE_DELAYED;
+ case TCA_CLS_HW_STATS_TYPE_DISABLED:
+ return FLOW_CLS_HW_STATS_TYPE_DISABLED;
+ }
+}
+
static inline void
tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
const struct tcf_proto *tp, u32 flags,
@@ -180,6 +180,31 @@ enum {
#define TCA_CLS_FLAGS_NOT_IN_HW (1 << 3) /* filter isn't offloaded to HW */
#define TCA_CLS_FLAGS_VERBOSE (1 << 4) /* verbose logging */
+/* tca HW stats type */
+enum tca_cls_hw_stats_type {
+ TCA_CLS_HW_STATS_TYPE_ANY, /* User does not care, it's default
+ * when user does not pass the attr.
+ * Instructs the driver that user does not
+ * care if the HW stats are "immediate"
+ * or "delayed".
+ */
+ TCA_CLS_HW_STATS_TYPE_IMMEDIATE, /* Means that in dump, user gets
+ * the current HW stats state from
+ * the device queried at the dump time.
+ */
+ TCA_CLS_HW_STATS_TYPE_DELAYED, /* Means that in dump, user gets
+ * HW stats that might be out of date
+ * for some time, maybe couple of
+ * seconds. This is the case when driver
+ * polls stats updates periodically
+ * or when it gets async stats update
+ * from the device.
+ */
+ TCA_CLS_HW_STATS_TYPE_DISABLED, /* User is not interested in getting
+ * any HW statistics.
+ */
+};
+
/* U32 filters */
#define TC_U32_HTID(h) ((h)&0xFFF00000)
@@ -553,6 +578,8 @@ enum {
TCA_FLOWER_KEY_CT_LABELS, /* u128 */
TCA_FLOWER_KEY_CT_LABELS_MASK, /* u128 */
+ TCA_FLOWER_HW_STATS_TYPE, /* u8 */
+
__TCA_FLOWER_MAX,
};
@@ -113,6 +113,7 @@ struct cls_fl_filter {
u32 handle;
u32 flags;
u32 in_hw_count;
+ enum tca_cls_hw_stats_type hw_stats_type;
struct rcu_work rwork;
struct net_device *hw_dev;
/* Flower classifier is unlocked, which means that its reference counter
@@ -442,6 +443,8 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
return -ENOMEM;
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
+ cls_flower.common.hw_stats_type =
+ tc_flow_cls_hw_stats_type(f->hw_stats_type);
cls_flower.command = FLOW_CLS_REPLACE;
cls_flower.cookie = (unsigned long) f;
cls_flower.rule->match.dissector = &f->mask->dissector;
@@ -691,6 +694,7 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
[TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
.len = 128 / BITS_PER_BYTE },
[TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
+ [TCA_FLOWER_HW_STATS_TYPE] = { .type = NLA_U8 },
};
static const struct nla_policy
@@ -1774,6 +1778,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
}
}
+ fnew->hw_stats_type =
+ tc_hw_stats_type_get(tb[TCA_FLOWER_HW_STATS_TYPE]);
+
err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
tp->chain->tmplt_priv, rtnl_held, extack);
if (err)
@@ -1992,6 +1999,8 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
extack);
+ cls_flower.common.hw_stats_type =
+ tc_flow_cls_hw_stats_type(f->hw_stats_type);
cls_flower.command = add ?
FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
cls_flower.cookie = (unsigned long)f;
@@ -2714,6 +2723,9 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
goto nla_put_failure_locked;
+ if (nla_put_u8(skb, TCA_FLOWER_HW_STATS_TYPE, f->hw_stats_type))
+ goto nla_put_failure_locked;
+
spin_unlock(&tp->lock);
if (!skip_hw)