[linux-yocto] [PATCH 34/42] rionet: Fix the corruption of tx_cnt during Transmit.

Cristian Bercaru cristian.bercaru at windriver.com
Thu Jun 11 01:32:19 PDT 2015


From: Palani <palaniappan.ramanathan at intel.com>

The non-atomic variable "tx_cnt" can be updated simultaneously
from different threads using "rionet_start_xmit" (Tx) and the
"rionet_outb_msg_event" (callback) functions. These functions
use different locks: "tx_lock" for the Tx and "lock" for the
callback function. This corruption of tx_cnt results in a kernel
crash, particularly visible during high throughput data trasfers.

Fixed to use the same lock "tx_lock" so as to avoid corruption
of tx_cnt.

Signed-off-by: Palani <palaniappan.ramanathan at lsi.com>
---
 drivers/net/rionet.c |  130 +++++++++++++++++++++++++-------------------------
 1 file changed, 65 insertions(+), 65 deletions(-)

diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 6d1f6ed..05c2832 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -101,7 +101,8 @@ static int rionet_rx_clean(struct net_device *ndev)
 		if (!rnet->rx_skb[i])
 			continue;
 
-		if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX)))
+		data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX);
+		if (!data)
 			break;
 
 		rnet->rx_skb[i]->data = data;
@@ -159,8 +160,7 @@ static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
 	rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1);
 
 	if (netif_msg_tx_queued(rnet))
-		printk(KERN_INFO "%s: queued skb len %8.8x\n", DRV_NAME,
-		       skb->len);
+		pr_info("%s: queued skb len %8.8x\n", DRV_NAME, skb->len);
 
 	return 0;
 }
@@ -186,7 +186,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 	if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) {
 		netif_stop_queue(ndev);
 		spin_unlock_irqrestore(&rnet->tx_lock, flags);
-		printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
+		pr_err("%s: BUG! Tx Ring full when queue awake!\n",
 		       ndev->name);
 		return NETDEV_TX_BUSY;
 	}
@@ -197,20 +197,17 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 		for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
 				i++)
 			if (nets[rnet->mport->id].active[i]) {
-				rionet_queue_tx_msg(skb, ndev,
-					nets[rnet->mport->id].active[i]);
+				rionet_queue_tx_msg(skb, ndev, nets[rnet->mport->id].active[i]);
 				if (count)
 					atomic_inc(&skb->users);
 				count++;
 			}
 	} else if (RIONET_MAC_MATCH(eth->h_dest)) {
 		destid = RIONET_GET_DESTID(eth->h_dest);
-		if (nets[rnet->mport->id].active[destid])
-			rionet_queue_tx_msg(skb, ndev,
-					nets[rnet->mport->id].active[destid]);
-		else {
-			/*
-			 * If the target device was removed from the list of
+		if (nets[rnet->mport->id].active[destid]) {
+			rionet_queue_tx_msg(skb, ndev, nets[rnet->mport->id].active[destid]);
+		} else {
+			/* If the target device was removed from the list of
 			 * active peers but we still have TX packets targeting
 			 * it just report sending a packet to the target
 			 * (without actual packet transfer).
@@ -234,12 +231,12 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u
 	struct rionet_peer *peer;
 
 	if (netif_msg_intr(rnet))
-		printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x",
-		       DRV_NAME, sid, tid, info);
+		pr_info(
+				"%s: doorbell sid %4.4x tid %4.4x info %4.4x",
+				DRV_NAME, sid, tid, info);
 	if (info == RIONET_DOORBELL_JOIN) {
 		if (!nets[rnet->mport->id].active[sid]) {
-			list_for_each_entry(peer,
-					   &nets[rnet->mport->id].peers, node) {
+			list_for_each_entry(peer, &nets[rnet->mport->id].peers, node) {
 				if (peer->rdev->destid == sid) {
 					nets[rnet->mport->id].active[sid] =
 								peer->rdev;
@@ -254,8 +251,7 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u
 		nets[rnet->mport->id].nact--;
 	} else {
 		if (netif_msg_intr(rnet))
-			printk(KERN_WARNING "%s: unhandled doorbell\n",
-			       DRV_NAME);
+			pr_warn("%s: unhandled doorbell\n", DRV_NAME);
 	}
 }
 
@@ -266,11 +262,13 @@ static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox
 	struct rionet_private *rnet = netdev_priv(ndev);
 
 	if (netif_msg_intr(rnet))
-		printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n",
+		pr_info(
+				"%s: inbound message event, mbox %d slot %d\n",
 		       DRV_NAME, mbox, slot);
 
 	spin_lock(&rnet->lock);
-	if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot)
+	n = rionet_rx_clean(ndev);
+	if (n != rnet->rx_slot)
 		rionet_rx_fill(ndev, n);
 	spin_unlock(&rnet->lock);
 }
@@ -280,10 +278,10 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
 	struct net_device *ndev = dev_id;
 	struct rionet_private *rnet = netdev_priv(ndev);
 
-	spin_lock(&rnet->lock);
+	spin_lock(&rnet->tx_lock);
 
 	if (netif_msg_intr(rnet))
-		printk(KERN_INFO
+		pr_info(
 		       "%s: outbound message event, mbox %d slot %d\n",
 		       DRV_NAME, mbox, slot);
 
@@ -299,7 +297,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
 	if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
 		netif_wake_queue(ndev);
 
-	spin_unlock(&rnet->lock);
+	spin_unlock(&rnet->tx_lock);
 }
 
 static int rionet_open(struct net_device *ndev)
@@ -309,27 +307,31 @@ static int rionet_open(struct net_device *ndev)
 	struct rionet_private *rnet = netdev_priv(ndev);
 
 	if (netif_msg_ifup(rnet))
-		printk(KERN_INFO "%s: open\n", DRV_NAME);
-
-	if ((rc = rio_request_inb_dbell(rnet->mport,
-					(void *)ndev,
-					RIONET_DOORBELL_JOIN,
-					RIONET_DOORBELL_LEAVE,
-					rionet_dbell_event)) < 0)
+		pr_info("%s: open\n", DRV_NAME);
+
+	rc = rio_request_inb_dbell(
+			rnet->mport,
+			(void *)ndev,
+			RIONET_DOORBELL_JOIN,
+			RIONET_DOORBELL_LEAVE,
+			rionet_dbell_event);
+	if (rc < 0)
 		goto out;
 
-	if ((rc = rio_request_inb_mbox(rnet->mport,
-				       (void *)ndev,
-				       RIONET_MAILBOX,
-				       RIONET_RX_RING_SIZE,
-				       rionet_inb_msg_event)) < 0)
+	rc = rio_request_inb_mbox(
+			rnet->mport,
+			(void *)ndev,
+			RIONET_MAILBOX,
+			RIONET_RX_RING_SIZE,
+			rionet_inb_msg_event);
+	if (rc < 0)
 		goto out;
 
-	if ((rc = rio_request_outb_mbox(rnet->mport,
-					(void *)ndev,
-					RIONET_MAILBOX,
-					RIONET_TX_RING_SIZE,
-					rionet_outb_msg_event)) < 0)
+	rc = rio_request_outb_mbox(
+			rnet->mport, (void *)ndev, RIONET_MAILBOX,
+			RIONET_TX_RING_SIZE,
+			rionet_outb_msg_event);
+	if (rc  < 0)
 		goto out;
 
 	/* Initialize inbound message ring */
@@ -347,11 +349,10 @@ static int rionet_open(struct net_device *ndev)
 
 	list_for_each_entry_safe(peer, tmp,
 				 &nets[rnet->mport->id].peers, node) {
-		if (!(peer->res = rio_request_outb_dbell(peer->rdev,
-							 RIONET_DOORBELL_JOIN,
-							 RIONET_DOORBELL_LEAVE)))
-		{
-			printk(KERN_ERR "%s: error requesting doorbells\n",
+		peer->res = rio_request_outb_dbell(peer->rdev, RIONET_DOORBELL_JOIN,
+				 RIONET_DOORBELL_LEAVE);
+		if (!peer->res) {
+			pr_err("%s: error requesting doorbells\n",
 			       DRV_NAME);
 			continue;
 		}
@@ -360,7 +361,7 @@ static int rionet_open(struct net_device *ndev)
 		rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
 	}
 
-      out:
+out:
 	return rc;
 }
 
@@ -371,7 +372,7 @@ static int rionet_close(struct net_device *ndev)
 	int i;
 
 	if (netif_msg_ifup(rnet))
-		printk(KERN_INFO "%s: close %s\n", DRV_NAME, ndev->name);
+		pr_info("%s: close %s\n", DRV_NAME, ndev->name);
 
 	netif_stop_queue(ndev);
 	netif_carrier_off(ndev);
@@ -504,7 +505,8 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
 	if (rc != 0)
 		goto out;
 
-	printk(KERN_INFO "%s: %s %s Version %s, MAC %pM, %s\n",
+	pr_info(
+			"%s: %s %s Version %s, MAC %pM, %s\n",
 	       ndev->name,
 	       DRV_NAME,
 	       DRV_DESC,
@@ -512,7 +514,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
 	       ndev->dev_addr,
 	       mport->name);
 
-      out:
+out:
 	return rc;
 }
 
@@ -533,8 +535,7 @@ static int rionet_add_dev(struct device *dev, struct subsys_interface *sif)
 
 	oldnet = test_and_set_bit(netid, net_table);
 
-	/*
-	 * If first time through this net, make sure local device is rionet
+	/* If first time through this net, make sure local device is rionet
 	 * capable and setup netdev (this step will be skipped in later probes
 	 * on the same net).
 	 */
@@ -544,37 +545,37 @@ static int rionet_add_dev(struct device *dev, struct subsys_interface *sif)
 		rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
 					 &ldst_ops);
 		if (!is_rionet_capable(lsrc_ops, ldst_ops)) {
-			printk(KERN_ERR
-			       "%s: local device %s is not network capable\n",
-			       DRV_NAME, rdev->net->hport->name);
+			pr_err("%s: local device %s is not network capable\n", DRV_NAME, rdev->net->hport->name);
 			goto out;
 		}
 
 		/* Allocate our net_device structure */
 		ndev = alloc_etherdev(sizeof(struct rionet_private));
-		if (ndev == NULL) {
+		if (!ndev) {
 			rc = -ENOMEM;
 			goto out;
 		}
 		nets[netid].ndev = ndev;
 		rc = rionet_setup_netdev(rdev->net->hport, ndev);
 		if (rc) {
-			printk(KERN_ERR "%s: failed to setup netdev (rc=%d)\n",
+			pr_err("%s: failed to setup netdev (rc=%d)\n",
 			       DRV_NAME, rc);
 			goto out;
 		}
 
 		INIT_LIST_HEAD(&nets[netid].peers);
 		nets[netid].nact = 0;
-	} else if (nets[netid].ndev == NULL)
-		goto out;
+	} else {
+		if (!nets[netid].ndev)
+			goto out;
+	}
 
-	/*
-	 * If the remote device has mailbox/doorbell capabilities,
+	/* If the remote device has mailbox/doorbell capabilities,
 	 * add it to the peer list.
 	 */
 	if (dev_rionet_capable(rdev)) {
-		if (!(peer = kmalloc(sizeof(struct rionet_peer), GFP_KERNEL))) {
+		peer = kmalloc(sizeof(*peer), GFP_KERNEL);
+		if (!peer) {
 			rc = -ENOMEM;
 			goto out;
 		}
@@ -616,7 +617,7 @@ static void __exit rionet_exit(void)
 	int i;
 
 	for (i = 0; i < RIONET_MAX_NETS; i++) {
-		if (nets[i].ndev != NULL) {
+		if (nets[i].ndev) {
 			ndev = nets[i].ndev;
 			rnet = netdev_priv(ndev);
 			unregister_netdev(ndev);
@@ -627,9 +628,8 @@ static void __exit rionet_exit(void)
 				kfree(peer);
 			}
 
-			free_pages((unsigned long)nets[i].active,
-				 get_order(sizeof(void *) *
-				 RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size)));
+			free_pages((unsigned long)nets[i].active, get_order(sizeof(void *) *
+					RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size)));
 			nets[i].active = NULL;
 
 			free_netdev(ndev);
-- 
1.7.9.5



More information about the linux-yocto mailing list