diff mbox series

[1/2] of: of_node_get()/of_node_put() nodes held in phandle cache

Message ID 1544769771-5468-2-git-send-email-frowand.list@gmail.com
State Superseded, archived
Headers show
Series of: phandle_cache, fix refcounts, remove stale entry | expand

Checks

Context Check Description
robh/checkpatch success

Commit Message

Frank Rowand Dec. 14, 2018, 6:42 a.m. UTC
From: Frank Rowand <frank.rowand@sony.com>

The phandle cache contains struct device_node pointers.  The refcount
of the pointers was not incremented while in the cache, allowing use
after free error after kfree() of the node.  Add the proper increment
and decrement of the use count.

Fixes: 0b3ce78e90fc ("of: cache phandle nodes to reduce cost of of_find_node_by_phandle()")

Signed-off-by: Frank Rowand <frank.rowand@sony.com>
---

do not "cc: stable", unless the following commits are also in stable:

  commit e54192b48da7 ("of: fix phandle cache creation for DTs with no phandles")
  commit b9952b5218ad ("of: overlay: update phandle cache on overlay apply and remove")
  commit 0b3ce78e90fc ("of: cache phandle nodes to reduce cost of of_find_node_by_phandle()")

 drivers/of/base.c | 70 ++++++++++++++++++++++++++++++++++++-------------------
 1 file changed, 46 insertions(+), 24 deletions(-)

Comments

Rob Herring Dec. 14, 2018, 5:15 p.m. UTC | #1
On Fri, Dec 14, 2018 at 12:43 AM <frowand.list@gmail.com> wrote:
>
> From: Frank Rowand <frank.rowand@sony.com>
>
> The phandle cache contains struct device_node pointers.  The refcount
> of the pointers was not incremented while in the cache, allowing use
> after free error after kfree() of the node.  Add the proper increment
> and decrement of the use count.

Since we pre-populate the cache at boot, all the nodes will have a ref
count and will never be freed unless we happen to repopulate the whole
cache. That doesn't seem ideal. The node pointer is not "in use" just
because it is in the cache.

Rob
Frank Rowand Dec. 14, 2018, 10:47 p.m. UTC | #2
On 12/14/18 9:15 AM, Rob Herring wrote:
> On Fri, Dec 14, 2018 at 12:43 AM <frowand.list@gmail.com> wrote:
>>
>> From: Frank Rowand <frank.rowand@sony.com>
>>
>> The phandle cache contains struct device_node pointers.  The refcount
>> of the pointers was not incremented while in the cache, allowing use
>> after free error after kfree() of the node.  Add the proper increment
>> and decrement of the use count.
> 
> Since we pre-populate the cache at boot, all the nodes will have a ref
> count and will never be freed unless we happen to repopulate the whole
> cache. That doesn't seem ideal. The node pointer is not "in use" just
> because it is in the cache.
> 
> Rob
> 

This patch also adds of_node_put() so that the refcount will go to zero
when the node is removed as part of an overlay remove, if the node was
added by an overlay.

Patch 2/2 adds the free cache entry call to __of_detach_node(), so the
refcount will go to zero when the node is removed for dynamic use cases
other than overlays.  (For overlays, all nodes are instead removed from
the cache before __of_detach_node() is called.)

-Frank
Frank Rowand Dec. 14, 2018, 11:04 p.m. UTC | #3
On 12/14/18 2:47 PM, Frank Rowand wrote:
> On 12/14/18 9:15 AM, Rob Herring wrote:
>> On Fri, Dec 14, 2018 at 12:43 AM <frowand.list@gmail.com> wrote:
>>>
>>> From: Frank Rowand <frank.rowand@sony.com>
>>>
>>> The phandle cache contains struct device_node pointers.  The refcount
>>> of the pointers was not incremented while in the cache, allowing use
>>> after free error after kfree() of the node.  Add the proper increment
>>> and decrement of the use count.
>>
>> Since we pre-populate the cache at boot, all the nodes will have a ref
>> count and will never be freed unless we happen to repopulate the whole

>> cache. That doesn't seem ideal. The node pointer is not "in use" just
>> because it is in the cache.

I forgot to reply to this sentence.

The node pointers are "in use" because of_find_node_by_phandle() will
use the pointers to access the phandle field.  This is a use after
free bug if the node has been kfree()'ed.


>>
>> Rob
>>
> 
> This patch also adds of_node_put() so that the refcount will go to zero
> when the node is removed as part of an overlay remove, if the node was
> added by an overlay.
> 
> Patch 2/2 adds the free cache entry call to __of_detach_node(), so the
> refcount will go to zero when the node is removed for dynamic use cases
> other than overlays.  (For overlays, all nodes are instead removed from
> the cache before __of_detach_node() is called.)
> 
> -Frank
>
diff mbox series

Patch

diff --git a/drivers/of/base.c b/drivers/of/base.c
index 09692c9b32a7..d599367cb92a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -116,9 +116,6 @@  int __weak of_node_to_nid(struct device_node *np)
 }
 #endif
 
-static struct device_node **phandle_cache;
-static u32 phandle_cache_mask;
-
 /*
  * Assumptions behind phandle_cache implementation:
  *   - phandle property values are in a contiguous range of 1..n
@@ -127,6 +124,44 @@  int __weak of_node_to_nid(struct device_node *np)
  *   - the phandle lookup overhead reduction provided by the cache
  *     will likely be less
  */
+
+static struct device_node **phandle_cache;
+static u32 phandle_cache_mask;
+
+/*
+ * Caller must hold devtree_lock.
+ */
+void __of_free_phandle_cache(void)
+{
+	u32 cache_entries = phandle_cache_mask + 1;
+	u32 k;
+
+	if (!phandle_cache)
+		return;
+
+	for (k = 0; k < cache_entries; k++)
+		of_node_put(phandle_cache[k]);
+
+	kfree(phandle_cache);
+	phandle_cache = NULL;
+}
+
+int of_free_phandle_cache(void)
+{
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&devtree_lock, flags);
+
+	__of_free_phandle_cache();
+
+	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+
+	return 0;
+}
+#if !defined(CONFIG_MODULES)
+late_initcall_sync(of_free_phandle_cache);
+#endif
+
 void of_populate_phandle_cache(void)
 {
 	unsigned long flags;
@@ -136,8 +171,7 @@  void of_populate_phandle_cache(void)
 
 	raw_spin_lock_irqsave(&devtree_lock, flags);
 
-	kfree(phandle_cache);
-	phandle_cache = NULL;
+	__of_free_phandle_cache();
 
 	for_each_of_allnodes(np)
 		if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
@@ -155,30 +189,15 @@  void of_populate_phandle_cache(void)
 		goto out;
 
 	for_each_of_allnodes(np)
-		if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
+		if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
+			of_node_get(np);
 			phandle_cache[np->phandle & phandle_cache_mask] = np;
+		}
 
 out:
 	raw_spin_unlock_irqrestore(&devtree_lock, flags);
 }
 
-int of_free_phandle_cache(void)
-{
-	unsigned long flags;
-
-	raw_spin_lock_irqsave(&devtree_lock, flags);
-
-	kfree(phandle_cache);
-	phandle_cache = NULL;
-
-	raw_spin_unlock_irqrestore(&devtree_lock, flags);
-
-	return 0;
-}
-#if !defined(CONFIG_MODULES)
-late_initcall_sync(of_free_phandle_cache);
-#endif
-
 void __init of_core_init(void)
 {
 	struct device_node *np;
@@ -1195,8 +1214,11 @@  struct device_node *of_find_node_by_phandle(phandle handle)
 	if (!np) {
 		for_each_of_allnodes(np)
 			if (np->phandle == handle) {
-				if (phandle_cache)
+				if (phandle_cache) {
+					/* will put when removed from cache */
+					of_node_get(np);
 					phandle_cache[masked_handle] = np;
+				}
 				break;
 			}
 	}