[linux-yocto] [PATCH 4.18-rt] of: phandle_cache merge fixup for preempt-rt

Paul Gortmaker paul.gortmaker at windriver.com
Thu Mar 21 12:11:03 PDT 2019


In yocto merge 5cc3fddf0e2a ("Merge branch 'v4.18/standard/base' into
v4.18/standard/preempt-rt/base") there was a conflict between the
backport of upstream b8a9ac1a5b99 ("of: of_node_get()/of_node_put() nodes
held in phandle cache") and the rt-specific commit b75caf08b721 ("of:
allocate / free phandle cache outside of the devtree_lock")

Unfortunately the conflict resolution left a shadow variable introduced
by the rt patch as undefined, resulting in:

drivers/of/base.c:120:2: error: 'shadow' undeclared (first use in this function)
  shadow = phandle_cache;
  ^~~~~~

drivers/of/base.c: In function 'of_populate_phandle_cache':
drivers/of/base.c:182:7: warning: 'shadow' is used uninitialized in this function [-Wuninitialized]

Here we create a merge fix-up that basically aligns the impacted code to
match what is currently found in the 4.19-rt (as of v4.19.15-rt12).

Reported-by: Yue Tao <Yue.Tao at windriver.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker at windriver.com>

diff --git a/drivers/of/base.c b/drivers/of/base.c
index 904c7686f9d7..4a33ef482bcc 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -106,31 +106,34 @@ static u32 phandle_cache_mask;
 /*
  * Caller must hold devtree_lock.
  */
-static void __of_free_phandle_cache(void)
+static struct device_node** __of_free_phandle_cache(void)
 {
 	u32 cache_entries = phandle_cache_mask + 1;
 	u32 k;
+	struct device_node **shadow;
 
 	if (!phandle_cache)
-		return;
+		return NULL;
 
 	for (k = 0; k < cache_entries; k++)
 		of_node_put(phandle_cache[k]);
 
 	shadow = phandle_cache;
 	phandle_cache = NULL;
+	return shadow;
 }
 
 int of_free_phandle_cache(void)
 {
 	unsigned long flags;
+	struct device_node **shadow;
 
 	raw_spin_lock_irqsave(&devtree_lock, flags);
 
-	__of_free_phandle_cache();
+	shadow = __of_free_phandle_cache();
 
 	raw_spin_unlock_irqrestore(&devtree_lock, flags);
-
+	kfree(shadow);
 	return 0;
 }
 #if !defined(CONFIG_MODULES)
@@ -169,27 +172,24 @@ void of_populate_phandle_cache(void)
 
 	raw_spin_lock_irqsave(&devtree_lock, flags);
 
-	__of_free_phandle_cache();
+	shadow = __of_free_phandle_cache();
 
 	for_each_of_allnodes(np)
 		if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
 			phandles++;
 
-	raw_spin_unlock_irqrestore(&devtree_lock, flags);
-	kfree(shadow);
-
 	if (!phandles)
-		return;
+		goto out;
+	raw_spin_unlock_irqrestore(&devtree_lock, flags);
 
 	cache_entries = roundup_pow_of_two(phandles);
 	phandle_cache_mask = cache_entries - 1;
 
-	shadow = kcalloc(cache_entries, sizeof(*phandle_cache), GFP_KERNEL);
-
-	if (!shadow)
-		return;
+	phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
+				GFP_ATOMIC);
 	raw_spin_lock_irqsave(&devtree_lock, flags);
-	phandle_cache = shadow;
+	if (!phandle_cache)
+		goto out;
 
 	for_each_of_allnodes(np)
 		if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
@@ -197,7 +197,9 @@ void of_populate_phandle_cache(void)
 			phandle_cache[np->phandle & phandle_cache_mask] = np;
 		}
 
+out:
 	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+	kfree(shadow);
 }
 
 void __init of_core_init(void)
-- 
2.7.4



More information about the linux-yocto mailing list