From patchwork Wed Jun 28 11:54:51 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Reshetova, Elena" X-Patchwork-Id: 781609 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3wyLvz4KCBz9s0m for ; Wed, 28 Jun 2017 22:00:31 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751690AbdF1MA3 (ORCPT ); Wed, 28 Jun 2017 08:00:29 -0400 Received: from mga14.intel.com ([192.55.52.115]:5509 "EHLO mga14.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751630AbdF1Lza (ORCPT ); Wed, 28 Jun 2017 07:55:30 -0400 Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 28 Jun 2017 04:55:30 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.40,275,1496127600"; d="scan'208";a="1165609346" Received: from elena-thinkpad-x230.fi.intel.com ([10.237.72.56]) by fmsmga001.fm.intel.com with ESMTP; 28 Jun 2017 04:55:27 -0700 From: Elena Reshetova To: netdev@vger.kernel.org Cc: bridge@lists.linux-foundation.org, linux-kernel@vger.kernel.org, kuznet@ms2.inr.ac.ru, jmorris@namei.org, kaber@trash.net, stephen@networkplumber.org, peterz@infradead.org, keescook@chromium.org, Elena Reshetova , Hans Liljestrand , David Windsor Subject: [PATCH 02/17] net: convert neighbour.refcnt from atomic_t to refcount_t Date: Wed, 28 Jun 2017 14:54:51 +0300 Message-Id: <1498650906-12907-3-git-send-email-elena.reshetova@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1498650906-12907-1-git-send-email-elena.reshetova@intel.com> References: <1498650906-12907-1-git-send-email-elena.reshetova@intel.com> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. Signed-off-by: Elena Reshetova Signed-off-by: Hans Liljestrand Signed-off-by: Kees Cook Signed-off-by: David Windsor --- include/net/arp.h | 2 +- include/net/ndisc.h | 2 +- include/net/neighbour.h | 9 +++++---- net/atm/clip.c | 6 +++--- net/core/neighbour.c | 14 +++++++------- net/decnet/dn_neigh.c | 2 +- 6 files changed, 18 insertions(+), 17 deletions(-) diff --git a/include/net/arp.h b/include/net/arp.h index 65619a2..17d90e4 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -28,7 +28,7 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 rcu_read_lock_bh(); n = __ipv4_neigh_lookup_noref(dev, key); - if (n && !atomic_inc_not_zero(&n->refcnt)) + if (n && !refcount_inc_not_zero(&n->refcnt)) n = NULL; rcu_read_unlock_bh(); diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 1036c90..31b1bb1 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -384,7 +384,7 @@ static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, cons rcu_read_lock_bh(); n = __ipv6_neigh_lookup_noref(dev, pkey); - if (n && !atomic_inc_not_zero(&n->refcnt)) + if (n && !refcount_inc_not_zero(&n->refcnt)) n = NULL; rcu_read_unlock_bh(); diff --git a/include/net/neighbour.h b/include/net/neighbour.h index e4dd3a2..c5f6d51 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -17,6 +17,7 @@ */ #include +#include #include #include #include @@ -137,7 +138,7 @@ struct neighbour { unsigned long confirmed; unsigned long updated; rwlock_t lock; - atomic_t refcnt; + refcount_t refcnt; struct sk_buff_head arp_queue; unsigned int arp_queue_len_bytes; struct timer_list timer; @@ -409,18 +410,18 @@ static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms) static inline void neigh_release(struct neighbour *neigh) { - if (atomic_dec_and_test(&neigh->refcnt)) + if (refcount_dec_and_test(&neigh->refcnt)) neigh_destroy(neigh); } static inline struct neighbour * neigh_clone(struct neighbour *neigh) { if (neigh) - atomic_inc(&neigh->refcnt); + refcount_inc(&neigh->refcnt); return neigh; } -#define neigh_hold(n) atomic_inc(&(n)->refcnt) +#define neigh_hold(n) refcount_inc(&(n)->refcnt) static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) { diff --git a/net/atm/clip.c b/net/atm/clip.c index ec527b6..68435f6 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -137,11 +137,11 @@ static int neigh_check_cb(struct neighbour *n) if (entry->vccs || time_before(jiffies, entry->expires)) return 0; - if (atomic_read(&n->refcnt) > 1) { + if (refcount_read(&n->refcnt) > 1) { struct sk_buff *skb; pr_debug("destruction postponed with ref %d\n", - atomic_read(&n->refcnt)); + refcount_read(&n->refcnt)); while ((skb = skb_dequeue(&n->arp_queue)) != NULL) dev_kfree_skb(skb); @@ -767,7 +767,7 @@ static void atmarp_info(struct seq_file *seq, struct neighbour *n, seq_printf(seq, "(resolving)\n"); else seq_printf(seq, "(expired, ref %d)\n", - atomic_read(&entry->neigh->refcnt)); + refcount_read(&entry->neigh->refcnt)); } else if (!svc) { seq_printf(seq, "%d.%d.%d\n", clip_vcc->vcc->dev->number, diff --git a/net/core/neighbour.c b/net/core/neighbour.c index d274f81..bc52190 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -141,7 +141,7 @@ static int neigh_forced_gc(struct neigh_table *tbl) * - it is not permanent */ write_lock(&n->lock); - if (atomic_read(&n->refcnt) == 1 && + if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & NUD_PERMANENT)) { rcu_assign_pointer(*np, rcu_dereference_protected(n->next, @@ -219,7 +219,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev) neigh_del_timer(n); n->dead = 1; - if (atomic_read(&n->refcnt) != 1) { + if (refcount_read(&n->refcnt) != 1) { /* The most unpleasant situation. We must destroy neighbour entry, but someone still uses it. @@ -300,7 +300,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device NEIGH_CACHE_STAT_INC(tbl, allocs); n->tbl = tbl; - atomic_set(&n->refcnt, 1); + refcount_set(&n->refcnt, 1); n->dead = 1; out: return n; @@ -409,7 +409,7 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, rcu_read_lock_bh(); n = __neigh_lookup_noref(tbl, pkey, dev); if (n) { - if (!atomic_inc_not_zero(&n->refcnt)) + if (!refcount_inc_not_zero(&n->refcnt)) n = NULL; NEIGH_CACHE_STAT_INC(tbl, hits); } @@ -438,7 +438,7 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, n = rcu_dereference_bh(n->next)) { if (!memcmp(n->primary_key, pkey, key_len) && net_eq(dev_net(n->dev), net)) { - if (!atomic_inc_not_zero(&n->refcnt)) + if (!refcount_inc_not_zero(&n->refcnt)) n = NULL; NEIGH_CACHE_STAT_INC(tbl, hits); break; @@ -786,7 +786,7 @@ static void neigh_periodic_work(struct work_struct *work) if (time_before(n->used, n->confirmed)) n->used = n->confirmed; - if (atomic_read(&n->refcnt) == 1 && + if (refcount_read(&n->refcnt) == 1 && (state == NUD_FAILED || time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) { *np = n->next; @@ -2196,7 +2196,7 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh, ci.ndm_used = jiffies_to_clock_t(now - neigh->used); ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed); ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated); - ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1; + ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1; read_unlock_bh(&neigh->lock); if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) || diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c index eeb5fc5..21dedf6 100644 --- a/net/decnet/dn_neigh.c +++ b/net/decnet/dn_neigh.c @@ -559,7 +559,7 @@ static inline void dn_neigh_format_entry(struct seq_file *seq, (dn->flags&DN_NDFLAG_R2) ? "2" : "-", (dn->flags&DN_NDFLAG_P3) ? "3" : "-", dn->n.nud_state, - atomic_read(&dn->n.refcnt), + refcount_read(&dn->n.refcnt), dn->blksize, (dn->n.dev) ? dn->n.dev->name : "?"); read_unlock(&n->lock);