[linux-yocto] [PATCH 13/42] drivers/rapidio/devices: Changes to support the axxia BSP

Cristian Bercaru cristian.bercaru at windriver.com
Thu Jun 11 01:31:58 PDT 2015


From: Charlie Paul <cpaul.windriver at gmail.com>

Driver changes to the rapidio to support the AXXIA 5500
board.

Signed-off-by: Charlie Paul <cpaul.windriver at gmail.com>
---
 drivers/rapidio/devices/Kconfig               |    1 +
 drivers/rapidio/devices/Makefile              |    1 +
 drivers/rapidio/devices/lsi/Kconfig           |   47 +
 drivers/rapidio/devices/lsi/Makefile          |    5 +
 drivers/rapidio/devices/lsi/axxia-rio-irq.c   | 2736 +++++++++++++++++++++++++
 drivers/rapidio/devices/lsi/axxia-rio-irq.h   |  211 ++
 drivers/rapidio/devices/lsi/axxia-rio-sysfs.c |  287 +++
 drivers/rapidio/devices/lsi/axxia-rio.c       | 1777 ++++++++++++++++
 drivers/rapidio/devices/lsi/axxia-rio.h       |  599 ++++++
 include/linux/rio.h                           |    6 +
 10 files changed, 5670 insertions(+)
 create mode 100644 drivers/rapidio/devices/lsi/Kconfig
 create mode 100644 drivers/rapidio/devices/lsi/Makefile
 create mode 100644 drivers/rapidio/devices/lsi/axxia-rio-irq.c
 create mode 100644 drivers/rapidio/devices/lsi/axxia-rio-irq.h
 create mode 100644 drivers/rapidio/devices/lsi/axxia-rio-sysfs.c
 create mode 100644 drivers/rapidio/devices/lsi/axxia-rio.c
 create mode 100644 drivers/rapidio/devices/lsi/axxia-rio.h

diff --git a/drivers/rapidio/devices/Kconfig b/drivers/rapidio/devices/Kconfig
index dcc815d..a1f46bf 100644
--- a/drivers/rapidio/devices/Kconfig
+++ b/drivers/rapidio/devices/Kconfig
@@ -16,3 +16,4 @@ config RAPIDIO_CN6XXX
 	  Includes support for Cavium Networks CN6XXX family of serial RapidIO
 	  devices.
 
+source "drivers/rapidio/devices/lsi/Kconfig"
diff --git a/drivers/rapidio/devices/Makefile b/drivers/rapidio/devices/Makefile
index 565ebfb..d8d9d90 100644
--- a/drivers/rapidio/devices/Makefile
+++ b/drivers/rapidio/devices/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_RAPIDIO_TSI721)	+= tsi721_mport.o
 tsi721_mport-y			:= tsi721.o
 tsi721_mport-$(CONFIG_RAPIDIO_DMA_ENGINE) += tsi721_dma.o
 obj-$(CONFIG_RAPIDIO_CN6XXX)    += cn6xxx.o
+obj-$(CONFIG_AXXIA_RIO)         += lsi/
\ No newline at end of file
diff --git a/drivers/rapidio/devices/lsi/Kconfig b/drivers/rapidio/devices/lsi/Kconfig
new file mode 100644
index 0000000..c31e2a1
--- /dev/null
+++ b/drivers/rapidio/devices/lsi/Kconfig
@@ -0,0 +1,47 @@
+
+config AXXIA_RIO
+       bool "LSI Embedded SRIO Controller support"
+       depends on RAPIDIO && (ACP || ARCH_AXXIA)
+       default "n"
+       ---help---
+         Include support for RapidIO controllers for LSI Axxia
+         devices. The Axxia devices with arm (AXM55xx) and
+         powerpc (AXM35xx) cores has the serial RapidIO
+         controllers.
+
+config RIO_MAINT_WIN_SIZE
+       hex "RIO mport maintenance window size"
+       depends on RAPIDIO
+       default "0x400000"
+       ---help---
+         Size of RAPIDIO maintenance transaction window.
+         If RapidIO LAW size in your platform is less than 0x400000,
+         you may use this option to adjust the maintenance transaction
+         window accordingly.
+
+config AXXIA_RIO_STAT
+       bool "AXXIA RIO driver statistics"
+       depends on AXXIA_RIO
+       default n
+       ---help---
+         If you say Y here, you will be able to measure and view
+         state and interrupt counters for the Axxia RapidIO
+         controller via sysfs. It provides the counters for mbox
+         messages. May add a slght cpu overhead.
+
+config AXXIA_RIO_16B_ID
+       bool "RapidIO large common transport system"
+       depends on AXXIA_RIO && (ACP && PPC)
+       default n
+       ---help---
+         Say yes here to enable use of 16 bit transport ID otherwise
+         transport ID has 8 bits. This is required only for older
+         Axxia devices (34xx). And not required for the newer AXM55xx
+         devices.
+
+config OB_DME_ENTRY_SIZE
+       int "Number of Descriptors per DME for Static allocation"
+       depends on AXXIA_RIO
+       default "4096"
+       ---help---
+         Number of Descriptors to allocate for each outbound DME.
diff --git a/drivers/rapidio/devices/lsi/Makefile b/drivers/rapidio/devices/lsi/Makefile
new file mode 100644
index 0000000..82d0236
--- /dev/null
+++ b/drivers/rapidio/devices/lsi/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the linux kernel.
+#
+obj-$(CONFIG_AXXIA_RIO)                 += axxia-rio.o axxia-rio-irq.o
+obj-$(CONFIG_AXXIA_RIO_STAT)           += axxia-rio-sysfs.o
diff --git a/drivers/rapidio/devices/lsi/axxia-rio-irq.c b/drivers/rapidio/devices/lsi/axxia-rio-irq.c
new file mode 100644
index 0000000..97efaa8
--- /dev/null
+++ b/drivers/rapidio/devices/lsi/axxia-rio-irq.c
@@ -0,0 +1,2736 @@
+/*
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/of_platform.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include <linux/dmapool.h>
+
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+
+#include "axxia-rio.h"
+unsigned int axxia_dme_tmr_mode[2] = { AXXIA_IBDME_INTERRUPT_MODE,
+					AXXIA_IBDME_TIMER_MODE };
+static int axxia_timer_mode_setup(char *str)
+{
+	unsigned int tmr_mode[3];
+	int i;
+	(void)get_options(str, ARRAY_SIZE(tmr_mode), tmr_mode);
+	for (i = 0; i < tmr_mode[0]; i++) {
+		if (tmr_mode[i+1] > 1)
+			pr_debug("Invalid parameter value for Timer Mode\n");
+		else
+			axxia_dme_tmr_mode[i] = AXXIA_IBDME_TIMER_MODE;
+	}
+	return 1;
+}
+__setup("axm_srio_tmr_mode=", axxia_timer_mode_setup);
+
+static int axxia_int_mode_setup(char *str)
+{
+	unsigned int int_mode[3];
+	int i;
+	(void)get_options(str, ARRAY_SIZE(int_mode), int_mode);
+	for (i = 0; i < int_mode[0]; i++) {
+		if (int_mode[i+1] > 1)
+			pr_debug("Invalid param value for Interrupt Mode\n");
+		else
+			axxia_dme_tmr_mode[i] = AXXIA_IBDME_INTERRUPT_MODE;
+	}
+	return 1;
+}
+__setup("axm_srio_int_mode=", axxia_int_mode_setup);
+
+#define AXXIA_HRTIMER_DELAY	(200 * 1000UL)
+unsigned int axxia_hrtimer_delay = AXXIA_HRTIMER_DELAY;
+static int __init axxia_hrtimer_setup(char *str)
+{
+	get_option(&str, &axxia_hrtimer_delay);
+	return 1;
+}
+__setup("axm_srio_tmr_period=", axxia_hrtimer_setup);
+
+/**************************sRIO SERDES *****************************/
+u32 srio_serdes_write32(struct rio_priv *priv, u32 addr, u32 val)
+{
+	void __iomem *regaddr;
+	u32 regval = 0;
+	regaddr = (priv->linkdown_reset.win) +
+			APB2SER_SRIO_PHY0_CFG_OFFSET;
+	iowrite32(val, (regaddr + SERDES_CMD0_OFFSET));
+	regval = ((1<<SERDES_CMD1_VALID_SHIFT) |
+			(0x1 << SERDES_CMD1_HWRITE_SHIFT) |
+			(0x01<<SERDES_CMD1_TSHIFT_SHIFT) |
+			(0x2<<SERDES_CMD1_HSZIE_SHIFT) |
+			(0x2 << SERDES_CMD1_HTRANS_SHIFT) |
+			(addr & SERDES_CMD1_HADDR_MASK));
+	iowrite32(regval, (regaddr + SERDES_CMD1_OFFSET));
+
+	regval = 0xffffffff;
+	while (1) {
+		regval = ioread32((regaddr + SERDES_CMD1_OFFSET));
+		if (!(regval & (1 << SERDES_CMD1_VALID_SHIFT)))
+			break;
+	}
+
+	regval = ioread32((regaddr + SERDES_READDATA1_OFFSET));
+	if (regval & SERDES_READDATA1_HRESP_MASK) {
+		dev_err(priv->dev, "SerDes write Failed... Returning 0\n");
+		return 0;
+	} else
+		return 1;
+}
+
+u32 srio_serdes_read32(struct rio_priv *priv, u32 addr)
+{
+	void __iomem *regaddr;
+	u32 regval = 0;
+	regaddr = (priv->linkdown_reset.win) +
+			APB2SER_SRIO_PHY0_CFG_OFFSET;
+	regval = ((1<<SERDES_CMD1_VALID_SHIFT) |
+			(0x01<<SERDES_CMD1_TSHIFT_SHIFT) |
+			(0x2<<SERDES_CMD1_HSZIE_SHIFT) |
+			(0x2 << SERDES_CMD1_HTRANS_SHIFT) |
+			(addr & SERDES_CMD1_HADDR_MASK));
+
+	iowrite32(regval, (regaddr + SERDES_CMD1_OFFSET));
+	regval = 0xffffffff;
+	while (1) {
+		regval = ioread32((regaddr + SERDES_CMD1_OFFSET));
+		if ((regval & (1 << SERDES_CMD1_VALID_SHIFT)) == 0x0)
+			break;
+	}
+	regval = ioread32((regaddr + SERDES_READDATA1_OFFSET));
+	if (regval & SERDES_READDATA1_HRESP_MASK) {
+		dev_err(priv->dev, "SerDes Read Failed... Returning 0\n");
+		return 0;
+	}
+	regval = ioread32((regaddr + SERDES_READDATA0_OFFSET));
+	return regval;
+}
+
+/**************************sRIO SERDES Ends ***************************/
+static void  ib_dme_irq_handler(struct rio_irq_handler *h/*, u32 state*/);
+/****************************************************************************
+**
+** Implementation Note:
+**
+** The Message DME registers lie within the fixed page block in the RAB SRIO
+** Configuration memory.  Thus, all or almost all of its register accesses
+** do not require use of the RAB memory paging register.  On the other hand,
+** the Message descriptor registers for the ACP34xx platform do lie outside
+** of the fixed page block.  For safety, we will direct all of the accesses
+** to the Message descriptor registers (on the ACP34xx platform and the like),
+** through the RIO mport's lcread and lcwrite interfaces which use a software
+** spin lock to control access.
+**
+*****************************************************************************/
+
+static inline void __dme_dw_dbg(struct device *dev, struct rio_msg_dme *dme,
+			u32 iout, u32 dw0, u32 dw1)
+{
+	int did, mb, let;
+	char *io;
+	char *id;
+	if (dw0 & DME_DESC_DW0_ERROR_MASK) {
+		did = DME_DESC_DW0_GET_DST_ID(dw0);
+		let = DME_DESC_DW1_GET_LETTER(dw1);
+		mb = DME_DESC_DW1_GET_MBOX(dw1);
+		if (iout) {
+			io = "OB";
+			id = "DID";
+		} else {
+			io = "IB";
+			id = "SID";
+		}
+#if defined(CONFIG_AXXIA_RIO_STAT)
+		dme->desc_error_count++;
+#endif
+		if (dw0 & DME_DESC_DW0_RIO_ERR) {
+			dev_err(dev,
+			"%s RIO ERR: %s = %x,Type:11,mbox=%d,letter=%d\n",
+			 io, id, did, mb, let);
+#if defined(CONFIG_AXXIA_RIO_STAT)
+			dme->desc_rio_err_count++;
+#endif
+		}
+		if (dw0 & DME_DESC_DW0_AXI_ERR) {
+			dev_err(dev,
+			"%s AXI ERR: %s = %x,Type:11,mbox=%d,letter=%d\n",
+			 io, id, did, mb, let);
+#if defined(CONFIG_AXXIA_RIO_STAT)
+			dme->desc_axi_err_count++;
+#endif
+		}
+		if (dw0 & DME_DESC_DW0_TIMEOUT_ERR) {
+			dev_err(dev,
+			"%s TIMEOUT ERR: %s = %x,Type:11,mbox=%d,letter=%d\n",
+			 io, id, did, mb, let);
+#if defined(CONFIG_AXXIA_RIO_STAT)
+			dme->desc_tmo_err_count++;
+#endif
+		}
+	}
+#if defined(CONFIG_AXXIA_RIO_STAT)
+	dme->desc_done_count++;
+#endif
+}
+
+#if defined(CONFIG_AXXIA_RIO_STAT)
+static void reset_state_counters(struct rio_priv *priv)
+{
+	priv->rpio_compl_count = 0;
+	priv->rpio_failed_count = 0;
+	priv->apio_compl_count = 0;
+	priv->apio_failed_count = 0;
+	priv->rio_pw_count = 0;
+	priv->rio_pw_msg_count = 0;
+}
+#endif /* defined(CONFIG_AXXIA_RIO_STAT) */
+
+/**
+ * thrd_irq_handler - Threaded interrupt handler
+ * @irq: Linux interrupt number
+ * @data: Pointer to interrupt-specific data
+ *
+ */
+static irqreturn_t thrd_irq_handler(int irq, void *data)
+{
+	struct rio_irq_handler *h = data;
+	struct rio_priv *priv = h->data;
+
+	/**
+	 * Invoke handler callback
+	 */
+	h->thrd_irq_fn(h);
+	axxia_local_config_write(priv, h->irq_enab_reg_addr, h->irq_state_mask);
+	return IRQ_HANDLED;
+}
+
+/**
+ * hw_irq_handler - RIO HW interrupt handler
+ * @irq: Linux interrupt number
+ * @data: Pointer to interrupt-specific data
+ *
+ */
+static irqreturn_t hw_irq_handler(int irq, void *data)
+{
+	struct rio_irq_handler *h = data;
+	struct rio_priv *priv = h->data;
+	u32 state;
+	/**
+	 * Get current interrupt state and clear latched state
+	 * for interrupts handled by current thread.
+	 */
+	axxia_local_config_read(priv, h->irq_state_reg_addr, &state);
+	state &= h->irq_state_mask;
+
+	if (state) {
+		axxia_local_config_write(priv, h->irq_enab_reg_addr, 0x0);
+		return IRQ_WAKE_THREAD;
+	}
+	return IRQ_NONE;
+}
+
+static irqreturn_t hw_irq_dme_handler(int irq, void *data)
+{
+	struct rio_irq_handler *h;
+	struct rio_priv *priv = data;
+
+	h = &priv->ib_dme_irq;
+	ib_dme_irq_handler(h);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * Caller must hold RAB lock
+ */
+int alloc_irq_handler(struct rio_irq_handler *h,
+		     void *data,
+		     const char *name)
+{
+	struct rio_priv *priv = data;/*mport->priv;*/
+	u32 mask;
+	int rc;
+
+	if (test_and_set_bit(RIO_IRQ_ENABLED, &h->state))
+		return -EBUSY;
+
+	h->data = data;
+	rc = request_threaded_irq(priv->irq_line,
+				  hw_irq_handler,
+				  thrd_irq_handler,
+				  IRQF_TRIGGER_NONE | IRQF_SHARED,
+				  name,
+				  (void *)h);
+	if (rc) {
+		clear_bit(RIO_IRQ_ENABLED,  &h->state);
+		h->data = NULL;
+		return rc;
+	}
+	if (h->irq_enab_reg_addr) {
+		axxia_local_config_read(priv, h->irq_enab_reg_addr, &mask);
+		mask |= h->irq_state_mask;
+		axxia_local_config_write(priv, h->irq_state_reg_addr, mask);
+		axxia_local_config_write(priv, h->irq_enab_reg_addr, mask);
+	}
+
+	return rc;
+}
+
+/**
+ * Caller must hold RAB lock
+ */
+
+void release_irq_handler(struct rio_irq_handler *h)
+{
+	struct rio_priv *priv = h->data;
+	u32 mask;
+
+	if (test_and_clear_bit(RIO_IRQ_ENABLED, &h->state)) {
+		axxia_local_config_read(priv, h->irq_enab_reg_addr, &mask);
+		mask &= ~h->irq_state_mask;
+		axxia_local_config_write(priv, h->irq_enab_reg_addr, mask);
+		free_irq(priv->irq_line, h);
+		if (h->release_fn)
+			h->release_fn(h);
+	}
+}
+
+/**
+ * MISC Indications
+ */
+#if defined(CONFIG_RAPIDIO_HOTPLUG)
+static void rio_port_down_notify(struct rio_mport *mport)
+{
+	unsigned long flags;
+	struct rio_priv *priv = mport->priv;
+
+	spin_lock_irqsave(&priv->port_lock, flags);
+	if (priv->port_notify_cb)
+		priv->port_notify_cb(mport);
+
+	spin_unlock_irqrestore(&priv->port_lock, flags);
+}
+#else
+#define rio_port_down_notify(mport)
+#endif
+
+/**
+ * __port_fatal_err - Check port error state and clear latched
+ *                    error state to enable detection of new events.
+ *
+ * @mport: Master port
+ *
+ * Returns:
+ * 1 -- port fatal error state is detected
+ * 0 -- port ok
+ */
+static inline void __misc_fatal(struct rio_mport *mport,
+				u32 misc_state)
+{
+	struct rio_priv *priv = mport->priv;
+	u32 amast = 0;
+	u32 aslv_state = 0;
+	u32 aslv_addr = 0;
+	u32 escsr, iecsr;
+
+	dev_err(priv->dev, "*************Fatal Error************\n");
+	axxia_local_config_read(priv, RIO_ESCSR(priv->port_ndx), &escsr);
+	axxia_local_config_read(priv, EPC_IECSR(priv->port_ndx), &iecsr);
+
+	/* clear latched state indications */
+	/* Adding I2E to preserve idle sequence select bit which is R/w */
+	axxia_local_config_write(priv, RIO_ESCSR(priv->port_ndx),
+				(escsr & (RIO_ESCSR_I2E | RIO_EXCSR_WOLR)));
+	dev_err(priv->dev, "port %d ESCSR(0x158) 0x%08x\n", priv->ndx, escsr);
+	if (iecsr & EPC_IECSR_RETE) {
+		dev_err(priv->dev, "Retry Error Threshold Exceeded\n");
+		axxia_local_config_write(priv, EPC_IECSR(priv->port_ndx),
+				(iecsr & EPC_IECSR_RETE));
+	}
+	if (misc_state & AMST_INT) {
+		axxia_local_config_read(priv, RAB_AMAST_STAT, &amast);
+		if (amast & RAB_AMAST_STAT_WRTO)
+			dev_err(priv->dev, "AMST Write Response Timeout Error\n");
+		if (amast & RAB_AMAST_STAT_RDTO)
+			dev_err(priv->dev, "AMST Read Response Timeout Error\n");
+		if (amast & RAB_AMAST_STAT_WRDE)
+			dev_err(priv->dev, "AMST Write Decode Error\n");
+		if (amast & RAB_AMAST_STAT_WRSE)
+			dev_err(priv->dev, "AMST Write Slave Error\n");
+		if (amast & RAB_AMAST_STAT_RDDE)
+			dev_err(priv->dev, "AMST Read Decode Error\n");
+		if (amast & RAB_AMAST_STAT_RDSE)
+			dev_err(priv->dev, "AMST Read Slave Error\n");
+		/* clear latched state */
+		axxia_local_config_write(priv, RAB_AMAST_STAT, amast);
+	}
+	if (misc_state & ASLV_INT) {
+		axxia_local_config_read(priv, RAB_ASLV_STAT_CMD,  &aslv_state);
+		axxia_local_config_read(priv, RAB_ASLV_STAT_ADDR, &aslv_addr);
+		if (aslv_state & RAB_ASLV_STAT_CMD_USUP) {
+			dev_err(priv->dev, "AMBA Slave Unsupported Command\n");
+			axxia_local_config_write(priv, RAB_ASLV_STAT_CMD,
+					 (aslv_state & RAB_ASLV_STAT_CMD_USUP));
+		}
+	}
+	if ((escsr & ESCSR_FATAL) ||
+	    (iecsr & EPC_IECSR_RETE) ||
+	    (misc_state & MISC_FATAL))
+		rio_port_down_notify(mport);
+}
+
+/**
+ * srio_sw_reset - Reset the SRIO (GRIO) module when it reaches a fatal
+ *                 lockup state or if it received a reset control symbol
+ */
+static void srio_sw_reset(struct rio_priv *priv)
+{
+	u32 r32;
+	u32 sval;
+	r32 = srio_serdes_read32(priv, SRIO_PHY_CONTROL0_OFFSET);
+	srio_serdes_write32(priv, SRIO_PHY_CONTROL0_OFFSET,
+			(r32 | priv->linkdown_reset.reg_mask));
+	while (1) {
+		sval = srio_serdes_read32(priv, SRIO_PHY_CONTROL0_OFFSET);
+		if ((sval & priv->linkdown_reset.reg_mask))
+			break;
+	}
+	srio_serdes_write32(priv, SRIO_PHY_CONTROL0_OFFSET, (r32));
+	sval = 0;
+	while (1) {
+		sval = srio_serdes_read32(priv, SRIO_PHY_CONTROL0_OFFSET);
+		if (sval == r32)
+			break;
+	}
+}
+
+/**
+ * PORT WRITE events
+ */
+/**
+ * pw_irq_handler - AXXIA port write interrupt handler
+ * @h: handler specific data
+ * @state: PW Interrupt state
+ *
+ * Handles port write interrupts.
+ */
+static void pw_irq_handler(struct rio_irq_handler *h, u32 state)
+{
+	struct rio_priv *priv = h->data;
+	struct rio_pw_irq *pw = priv->pw_data;
+	u32 csr;
+	int noofpw;
+	u32 msg_word;
+
+	if (pw == NULL) {
+		dev_dbg(priv->dev, "Spurious port write message\n");
+		return;
+	}
+
+	axxia_local_config_read(priv, RAB_IB_PW_CSR, &csr);
+	noofpw = RAB_IB_PW_NUMWORDS(csr);
+	dev_dbg(priv->dev, "%s: noofpw %d\n", __func__, noofpw);
+	if (!(noofpw)) {
+		dev_dbg(priv->dev, "PW Spurious Port Write\n");
+		return;
+	}
+#if defined(CONFIG_AXXIA_RIO_STAT)
+	priv->rio_pw_count++;
+#endif
+	while (noofpw) {
+
+read_buff:
+		axxia_local_config_read(priv, RAB_IB_PW_DATA, &msg_word);
+		pw->msg_buffer[pw->msg_wc++] = BSWAP(msg_word);
+		if (pw->msg_wc == 4) {
+#if defined(CONFIG_AXXIA_RIO_STAT)
+			priv->rio_pw_msg_count++;
+#endif
+			/*
+			 * Pass the port-write message to RIO
+			 * core for processing
+			 */
+			rio_inb_pwrite_handler(
+					 (union rio_pw_msg *)pw->msg_buffer);
+			pw->msg_wc = 0;
+		}
+		noofpw--;
+		if (noofpw)
+			goto read_buff;
+
+		axxia_local_config_read(priv, RAB_IB_PW_CSR, &csr);
+		noofpw = RAB_IB_PW_NUMWORDS(csr);
+	}
+}
+
+static void axxia_rio_flush_pw(struct rio_mport *mport, int noofpw,
+			     struct rio_pw_irq *pw_data)
+{
+	struct rio_priv *priv = mport->priv;
+	u32 dummy;
+	int x;
+
+	dev_dbg(priv->dev, "(%s): flush %d words from pwbuff\n",
+		__func__, noofpw);
+	for (x = 0; x < noofpw; x++) {
+		axxia_local_config_read(priv, RAB_IB_PW_DATA, &dummy);
+		pw_data->discard_count++;
+	}
+	pw_data->msg_wc = 0;
+}
+
+/**
+ * enable_pw - enable port-write interface unit
+ * @h: Interrupt handler specific data
+ *
+ * Caller must hold RAB lock
+ */
+static int enable_pw(struct rio_mport *mport)
+{
+	struct rio_priv *priv = mport->priv;
+	struct rio_pw_irq *pw_data;
+	u32 rval;
+	int rc = 0;
+
+	if (priv->pw_data)
+		return -EBUSY;
+
+	pw_data = kzalloc(sizeof(struct rio_pw_irq), GFP_KERNEL);
+	if (!pw_data)
+		return -ENOMEM;
+
+	axxia_local_config_read(priv, RAB_IB_PW_CSR, &rval);
+	rval |= RAB_IB_PW_EN;
+	axxia_rio_flush_pw(mport, RAB_IB_PW_NUMWORDS(rval), pw_data);
+	axxia_local_config_write(priv, RAB_IB_PW_CSR, rval);
+	priv->pw_data = pw_data;
+	return rc;
+}
+
+/**
+ * disable_pw - Disable port-write interface unit
+ * @mport: pointer to struct rio_mport
+ *
+ * Caller must hold RAB lock
+ */
+static void disable_pw(struct rio_mport *mport)
+{
+	struct rio_priv *priv = mport->priv;
+	struct rio_pw_irq *pw_data = priv->pw_data;
+	u32 rval;
+	if (pw_data == NULL)
+		return;
+
+	axxia_local_config_read(priv, RAB_IB_PW_CSR, &rval);
+	rval &= ~RAB_IB_PW_EN;
+	axxia_local_config_write(priv, RAB_IB_PW_CSR, rval);
+	kfree(pw_data);
+	priv->pw_data = NULL;
+}
+
+
+/**
+ * misc_irq_handler - MISC interrupt handler
+ * @h: handler specific data
+ * @state: Interrupt state
+ * Handles the Error, doorbell, Link reset request Interrupts
+ */
+static void misc_irq_handler(struct rio_irq_handler *h)
+{
+	struct rio_priv *priv = h->data;
+	struct rio_mport *mport = priv->mport;
+	u32 misc_state;
+
+	axxia_local_config_read(priv, RAB_INTR_STAT_MISC, &misc_state);
+	/*
+	 * Handle miscellaneous 'Link (IPG) Reset Request'
+	 */
+	if (misc_state & LINK_REQ_INT)
+		srio_sw_reset(priv);
+
+	if (misc_state & PORT_WRITE_INT)
+		pw_irq_handler(h, misc_state & PORT_WRITE_INT);
+
+	if (misc_state & (IB_DB_RCV_INT | OB_DB_DONE_INT))
+		db_irq_handler(h,
+			misc_state & (IB_DB_RCV_INT | OB_DB_DONE_INT));
+	/**
+	 * Notify platform if port is broken
+	 */
+	if (misc_state & MISC_FATAL)
+		__misc_fatal(mport, misc_state);
+
+	if (misc_state & GRIO_INT)
+		dev_err(priv->dev, "GRIO Error Interrupt\n");
+		/* TODO Need further Handling */
+	if (misc_state & LL_TL_INT)
+		dev_err(priv->dev, "Logical Layer Error Interrupt\n");
+		/* TODO Need further Handling */
+	if (misc_state & UNSP_RIO_REQ_INT)
+		dev_dbg(priv->dev, "Unsupported RIO Request Received\n");
+		/* TODO Need further Handling */
+	if (misc_state & UNEXP_MSG_INT)
+		dev_dbg_ratelimited(priv->dev,
+			"Unexpected Inbound Data Message Received\n");
+		/* TODO Need further Handling */
+
+	axxia_local_config_write(priv, RAB_INTR_STAT_MISC, misc_state);
+}
+
+static void misc_release_handler(struct rio_irq_handler *h)
+{
+	struct rio_priv *priv = h->data;
+	struct rio_mport *mport = priv->mport;
+	disable_pw(mport);
+}
+/**
+ * linkdown_irq_handler - Link Down interrupt Status interrupt handler
+ * @h: handler specific data
+ * @state: Interrupt state
+ */
+static void linkdown_irq_handler(struct rio_irq_handler *h/*, u32 state*/)
+{
+#if 0
+	struct rio_mport *mport = h->mport;
+
+	/**
+	 * Reset platform if port is broken
+	 */
+	if (state & RAB_SRDS_STAT1_LINKDOWN_INT)
+		srio_sw_reset(mport);
+#endif
+}
+
+/**
+ * rpio_irq_handler - RPIO interrupt handler.
+ * Service Peripheral Bus bridge, RapidIO -> Peripheral bus interrupt
+ *
+ * @h: handler specific data
+ * @state: Interrupt state
+ *
+ */
+static void rpio_irq_handler(struct rio_irq_handler *h)
+{
+	struct rio_priv *priv = h->data;
+	u32 rstate;
+	axxia_local_config_read(priv, RAB_INTR_STAT_RPIO, &rstate);
+#if defined(CONFIG_AXXIA_RIO_STAT)
+	if (rstate & RPIO_TRANS_COMPLETE)
+		priv->rpio_compl_count++;
+#endif
+	if (rstate &  RPIO_TRANS_FAILED) {
+		u32 rpio_stat;
+
+		axxia_local_config_read(priv, RAB_RPIO_STAT, &rpio_stat);
+		if (rpio_stat & RAB_RPIO_STAT_RSP_ERR)
+			dev_dbg(priv->dev, "RPIO AXI Response Error\n");
+		if (rpio_stat & RAB_RPIO_STAT_ADDR_MAP)
+			dev_dbg(priv->dev, "RPIO Invalid Address Mapping Error\n");
+		if (rpio_stat & RAB_RPIO_STAT_DISABLED)
+			dev_dbg(priv->dev, "RPIO Engine Not Enabled\n");
+
+		axxia_local_config_write(priv, RAB_RPIO_STAT, rpio_stat);
+#if defined(CONFIG_AXXIA_RIO_STAT)
+		priv->rpio_failed_count++;
+#endif
+	}
+	axxia_local_config_write(priv, RAB_INTR_STAT_RPIO, rstate);
+}
+
+/**
+ * APIO
+ */
+
+/**
+ * apio_irq_handler - APIO interrupt handler.
+ * Service Peripheral Bus bridge, Peripheral bus -> RapidIO interrupt
+ *
+ * @h: handler specific data
+ * @state: Interrupt state
+ *
+ */
+static void apio_irq_handler(struct rio_irq_handler *h)
+{
+	struct rio_priv *priv = h->data;
+	u32 astate;
+	axxia_local_config_read(priv, RAB_INTR_STAT_APIO, &astate);
+#if defined(CONFIG_AXXIA_RIO_STAT)
+	if (astate & APIO_TRANS_COMPLETE)
+		priv->apio_compl_count++;
+#endif
+	if (astate & APIO_TRANS_FAILED) {
+		u32 apio_stat;
+
+		axxia_local_config_read(priv, RAB_APIO_STAT, &apio_stat);
+		if (apio_stat & RAB_APIO_STAT_RQ_ERR)
+			dev_dbg(priv->dev, "APIO AXI Request Format Error\n");
+		if (apio_stat & RAB_APIO_STAT_TO_ERR)
+			dev_dbg(priv->dev, "APIO RIO Timeout Error\n");
+		if (apio_stat & RAB_APIO_STAT_RSP_ERR)
+			dev_dbg(priv->dev, "APIO RIO Response Error\n");
+		if (apio_stat & RAB_APIO_STAT_MAP_ERR)
+			dev_dbg(priv->dev, "APIO Invalid Address Mapping Error\n");
+		if (apio_stat & RAB_APIO_STAT_MAINT_DIS)
+			dev_dbg(priv->dev, "APIO Maintenance Mapping Not Enabled\n");
+		if (apio_stat & RAB_APIO_STAT_MEM_DIS)
+			dev_dbg(priv->dev, "APIO Memory Mapping Not Enabled\n");
+		if (apio_stat & RAB_APIO_STAT_DISABLED)
+			dev_dbg(priv->dev, "APIO Engine Not Enabled\n");
+		axxia_local_config_write(priv, RAB_APIO_STAT, apio_stat);
+#if defined(CONFIG_AXXIA_RIO_STAT)
+		priv->apio_failed_count++;
+#endif
+	}
+	axxia_local_config_write(priv, RAB_INTR_STAT_APIO, astate);
+}
+
+/**
+ * DOORBELL events
+ */
+
+/**
+ * axxia_rio_rx_db_int_handler - AXXIA inbound doorbell interrupt handler
+ * @mport: Master port with triggered interrupt
+ * @mask: Interrupt register data
+ *
+ * Handles inbound doorbell interrupts.  Executes a callback on received
+ * doorbell. Now called from the misc_irq thread, rio-misc-db.
+ */
+void rx_db_handler(struct rio_mport *mport)
+{
+	struct rio_priv *priv = mport->priv;
+	struct rio_dbell *dbell;
+	u32 csr, info;
+	u8 num_msg;
+	u16 src_id, db_info;
+	int found;
+
+	axxia_local_config_read(priv, RAB_IB_DB_CSR, &csr);
+	num_msg = IB_DB_CSR_NUM_MSG(csr);
+
+	for (; num_msg; num_msg--) {
+		axxia_local_config_read(priv, RAB_IB_DB_INFO, &info);
+		src_id = DBELL_SID(info);
+		db_info = DBELL_INF(info);
+
+		found = 0;
+		dev_dbg(priv->dev,
+			 "Processing doorbell, sid %4.4x info %4.4x\n",
+			src_id, db_info);
+
+		list_for_each_entry(dbell, &mport->dbells, node) {
+			if (dbell->res->start <= db_info &&
+			    (dbell->res->end >= db_info)) {
+				found = 1;
+				break;
+			}
+		}
+		if (found) {
+			/**
+			 * NOTE: dst is set to 0 since we don't have
+			 *       that value in the ACP
+			 */
+			if (dbell->dinb)
+				dbell->dinb(mport, dbell->dev_id, src_id,
+						0, db_info);
+		} else {
+			dev_dbg(priv->dev,
+				"Spurious doorbell, sid %4.4x info %4.4x\n",
+				src_id, db_info);
+		}
+	}
+}
+
+void db_irq_handler(struct rio_irq_handler *h, u32 state)
+{
+	struct rio_priv *priv = h->data;
+	struct rio_mport *mport = priv->mport;
+
+	/**
+	 * Handle RX doorbell events
+	 */
+	if (state & IB_DB_RCV_INT)
+		rx_db_handler(mport);
+
+	/**
+	 * Check for outbound doorbell Error conditions.
+	 */
+	if (state & OB_DB_DONE_INT) {
+		int db;
+		u32 csr;
+		for (db = 0; db < MAX_OB_DB; db++) {
+			axxia_local_config_read(priv, RAB_OB_DB_CSR(db), &csr);
+
+			if (OB_DB_STATUS(csr) == OB_DB_STATUS_RETRY)
+				dev_dbg(priv->dev,
+				  "Rio Doorbell Retry received\n");
+			else if (OB_DB_STATUS(csr) == OB_DB_STATUS_ERROR)
+				dev_dbg(priv->dev,
+				  "Rio Doorbell send Error\n");
+			else if (OB_DB_STATUS(csr) == OB_DB_STATUS_TIMEOUT)
+				dev_dbg(priv->dev,
+				  "Rio Doorbell send Timeout\n");
+		}
+	}
+}
+
+/**
+ * OBDME Events/Outbound Messages
+ */
+
+static void release_dme(struct kref *kref)
+{
+	struct rio_msg_dme *me = container_of(kref, struct rio_msg_dme, kref);
+	struct rio_priv *priv = me->priv;
+	struct rio_msg_desc *desc;
+	int i;
+
+	if (me->desc) {
+		for (i = 0, desc = me->desc; i < me->entries; i++, desc++)
+			kfree(desc->msg_virt);
+		kfree(me->desc);
+	}
+
+	kfree(me->descriptors);
+
+	if (priv->intern_msg_desc) {
+		if (me->dres.parent)
+			release_resource(&me->dres);
+	}
+
+	kfree(me);
+}
+
+static inline struct rio_msg_dme *dme_get(struct rio_msg_dme *me)
+{
+	if (me)
+		kref_get(&me->kref);
+	return me;
+}
+
+static inline void dme_put(struct rio_msg_dme *me)
+{
+	if (me)
+		kref_put(&me->kref, release_dme);
+}
+
+static inline int check_dme(int dme_no,
+			    int *num_dmes,
+			    int *dmes_in_use,
+			    int *dmes)
+{
+	int i;
+	for (i = 0; i < 2; i++) {
+		if (dme_no < num_dmes[i]) {
+			if (dmes[i] & (1 << dme_no)) {
+				if (dmes_in_use[i] & (1 << dme_no))
+					return -EBUSY;	/* Already allocated */
+				return 0;
+			}
+		} else {
+			dme_no -= num_dmes[i];
+		}
+	}
+
+	return -ENXIO;	/* Not available */
+}
+
+/*
+ * Enforce a DME 'choice' previously made
+ */
+static inline int select_dme(int dme_no,
+			     int *num_dmes,
+			     int *dmes_in_use,
+			     int *dmes,
+			     int value)
+{
+	int i;
+	for (i = 0; i < 2; i++) {
+		if (dme_no < num_dmes[i]) {
+			dmes_in_use[i] &= ~(1 << dme_no);
+			dmes_in_use[i] |= (value << dme_no);
+			return 0;
+		} else {
+			dme_no -= num_dmes[i];
+		}
+	}
+
+	return -ENXIO;	/* Not available */
+}
+
+/* Selects the DME for an Mbox
+ * based on its occupancy. Two Outbound DMEs
+ * are shared among mailboxes
+ */
+static inline int choose_ob_dme_static(
+	struct rio_priv	*priv,
+	int mbox_dest,
+	int buf_sz,
+	struct rio_msg_dme **ob_dme)
+{
+	int  i, ndx, sz, min_entries = 0;
+	int  dme_no = 0, ret_dme_no = -ENXIO;
+	struct rio_msg_dme *ret_dme = NULL;
+	struct rio_tx_dme *dme_s;
+
+	/* Multi-segment vs single-segment DMEs */
+	ndx = RIO_MBOX_TO_IDX(mbox_dest);
+	switch (ndx) {
+	case 0:
+		if ((priv->num_outb_dmes[0] == 0) || (priv->outb_dmes[0] == 0))
+			return -ENXIO;
+		break;
+	case 1:
+		if ((priv->num_outb_dmes[1] == 0) || (priv->outb_dmes[1] == 0))
+			return -ENXIO;
+		dme_no += priv->num_outb_dmes[0];
+		break;
+	default:
+		dev_err(priv->dev, "Attempt to select unknown OB DME type!\n");
+		return -ENXIO;
+	}
+
+	/* Find one with fewest entries, or sufficient free entries */
+	for (i = 0; i < priv->num_outb_dmes[ndx]; i++, dme_no++) {
+		sz = RIO_OUTB_DME_TO_BUF_SIZE(priv, dme_no);
+
+		if (sz > buf_sz)
+			continue;
+
+		dme_s = &priv->ob_dme_shared[dme_no];
+
+		if (dme_s->ring_size_free > min_entries) {
+			min_entries = dme_s->ring_size_free;
+			ret_dme = dme_s->me;
+			ret_dme_no = dme_no;
+		}
+	}
+
+	(*ob_dme) = ret_dme;
+	return ret_dme_no;
+}
+
+static void release_mbox(struct kref *kref)
+{
+	struct rio_rx_mbox *mb = container_of(kref, struct rio_rx_mbox, kref);
+	struct rio_priv *priv = mb->mport->priv;
+	int letter;
+	u32 dme_no;
+
+	/* Quickly disable the engines */
+	for (letter = 0; letter < RIO_MSG_MAX_LETTER; letter++) {
+		if (mb->me[letter])
+			axxia_local_config_write(priv,
+				   RAB_IB_DME_CTRL(mb->me[letter]->dme_no), 0);
+	}
+
+	/* And then release the remaining resources */
+	for (letter = 0; letter < RIO_MSG_MAX_LETTER; letter++) {
+		if (mb->me[letter]) {
+			dme_no = mb->me[letter]->dme_no;
+			dme_put(mb->me[letter]);
+			select_dme(dme_no,
+					&priv->num_inb_dmes[0],
+					&priv->inb_dmes_in_use[0],
+					&priv->inb_dmes[0], 0);
+			priv->ib_dme[dme_no] = NULL;
+		}
+	}
+
+
+	for (letter = 0; letter < RIO_MSG_MAX_LETTER; letter++)
+		kfree(mb->virt_buffer[letter]);
+
+	kfree(mb);
+}
+
+static inline struct rio_rx_mbox *mbox_get(struct rio_rx_mbox *mb)
+{
+	if (mb)
+		kref_get(&mb->kref);
+	return mb;
+}
+
+static inline void mbox_put(struct rio_rx_mbox *mb)
+{
+	if (mb)
+		kref_put(&mb->kref, release_mbox);
+}
+
+static int alloc_msg_descriptors(struct rio_mport *mport,
+				  struct resource *dres,
+				  int buf_sz,
+				  int entries,
+				  int need_to_init,
+				  struct rio_msg_desc **desc,
+				  struct rio_desc **descriptors)
+{
+	struct rio_priv *priv = mport->priv;
+	struct rio_msg_desc *rdesc = NULL, *idesc;
+	struct rio_desc *rdescriptors = NULL;
+	int i;
+
+	if (priv->intern_msg_desc) {
+		dres->name = "DME_DESC";
+		dres->flags = ACP_RESOURCE_HW_DESC;
+		if (allocate_resource(&priv->acpres[ACP_HW_DESC_RESOURCE],
+				dres, entries,
+				priv->acpres[ACP_HW_DESC_RESOURCE].start,
+				priv->acpres[ACP_HW_DESC_RESOURCE].end,
+				0x1, NULL, NULL)) {
+			memset(dres, 0, sizeof(*dres));
+			goto err;
+		}
+	} else {
+		dres->start = 0;
+	}
+
+	rdesc = kzalloc(sizeof(struct rio_msg_desc) * entries, GFP_ATOMIC);
+	if (rdesc == NULL)
+		goto err;
+	rdescriptors = kzalloc(sizeof(struct rio_desc) * entries, GFP_ATOMIC);
+	if (rdescriptors == NULL)
+		goto err;
+
+	for (i = 0, idesc = rdesc; i < need_to_init; i++, idesc++) {
+		idesc->msg_virt = kzalloc(buf_sz, GFP_KERNEL);
+		if (!idesc->msg_virt)
+			goto err;
+		idesc->msg_phys = virt_to_phys(idesc->msg_virt);
+	}
+
+	idesc--;
+	idesc->last = DME_DESC_DW0_NXT_DESC_VALID;
+
+	(*desc) = rdesc;
+	(*descriptors) = rdescriptors;
+
+	return 0;
+
+err:
+	kfree(rdesc);
+	kfree(rdescriptors);
+	return -ENOMEM;
+}
+
+static struct rio_msg_dme *alloc_message_engine(struct rio_mport *mport,
+						int dme_no, void *dev_id,
+						int buf_sz, int entries)
+{
+	struct rio_priv *priv = mport->priv;
+	struct rio_msg_dme *me = kzalloc(sizeof(struct rio_msg_dme),
+					 GFP_KERNEL);
+	int rc = 0;
+
+	if (!me)
+		return ERR_PTR(-ENOMEM);
+
+	memset(me, 0, sizeof(struct rio_msg_dme));
+
+	kref_init(&me->kref);
+	spin_lock_init(&me->lock);
+	me->priv = priv;
+	me->sz = 0;/*buf_sz;*/
+
+	rc = alloc_msg_descriptors(mport, &me->dres, buf_sz, entries,
+				entries, &me->desc, &me->descriptors);
+	if (rc < 0)
+		goto err;
+
+	me->entries = entries;
+	me->dev_id = dev_id;
+	me->write_idx = 0;
+	me->read_idx = 0;
+	me->tx_dme_tmo = 0;
+	me->dme_no = dme_no;
+
+	return me;
+
+err:
+	dme_put(me);
+	return ERR_PTR(rc);
+}
+
+/**
+ * ob_dme_msg_handler - Outbound Data message handler
+ * --- Called from OB DME irq handler thread ---
+ * @h: Pointer to interrupt-specific data
+ *
+ * Handles outbound message interrupts. Executes a callback,
+ * if available.
+ *
+ * @note:
+ * HW descriptor fetch and update may be out of order.
+ * Check state of all used descriptors and take care to not fall into
+ * any of the traps that come with this design:
+ *
+ * Due to this (possibly) out of order execution in the HW, SW ack of
+ * descriptors must be done atomically, re-enabling descriptors with
+ * completed transactions while processing finished transactions may
+ * break the ring and leave the DMA engine in a state where it doesn't
+ * process new inserted requests.
+ */
+static void  ob_dme_msg_handler(struct rio_irq_handler *h, u32 dme_no)
+{
+	struct rio_priv *priv = h->data;
+	struct rio_mport *mport = priv->mport;
+	struct rio_msg_dme *dme = priv->ob_dme_shared[dme_no].me;
+	u32 dw0;
+	u32 dw1;
+	int mbox;
+	struct rio_tx_mbox *mb;
+	unsigned int iteration = 0;
+
+	/**
+	 * Process all completed transactions
+	 */
+ob_dme_restart:
+	while (dme->read_idx != dme->write_idx) {
+		AXXIA_RIO_SYSMEM_BARRIER();
+		dw0 = *((u32 *)DESC_TABLE_W0_MEM(dme, dme->read_idx));
+		if ((dw0 & DME_DESC_DW0_VALID) &&
+			(dw0 & DME_DESC_DW0_READY_MASK)) {
+			*((u32 *)DESC_TABLE_W0_MEM(dme, dme->read_idx))
+					= dw0 & DME_DESC_DW0_NXT_DESC_VALID;
+			dw1 = *((u32 *)DESC_TABLE_W1_MEM(dme,
+						dme->read_idx));
+			__dme_dw_dbg(priv->dev, dme, 1, dw0, dw1);
+			dme->read_idx = (dme->read_idx + 1) &
+						(dme->entries - 1);
+			mbox = (dw1 >> 2) & 0x3;
+			mb = priv->ob_mbox[mbox];
+			if (mb) {
+				if (mport->outb_msg[mbox].mcback) {
+					mb->tx_slot = (mb->tx_slot + 1)
+							%(mb->ring_size);
+					mport->outb_msg[mbox].mcback(mport,
+							mb->dev_id,
+							mbox, mb->tx_slot);
+				}
+#ifdef CONFIG_AXXIA_RIO_STAT
+				mb->compl_msg_count++;
+#endif
+			}
+			iteration++;
+		} else
+			break;
+	}
+	if (iteration) {
+		iteration = 0;
+		goto ob_dme_restart;
+	}
+}
+
+/**
+ * ob_dme_irq_handler - Outbound message interrupt handler
+ * --- Called in threaded irq handler ---
+ * @h: Pointer to interrupt-specific data
+ *
+ * Handles outbound message interrupts. Calls the
+ * msg handler if dscriptor xfer complete is set.
+ * or reports the error
+ */
+enum hrtimer_restart ob_dme_tmr_handler(struct hrtimer *hr)
+{
+	struct rio_tx_dme *obd = container_of(hr, struct rio_tx_dme, tmr);
+	struct rio_msg_dme *me = obd->me;
+	struct rio_priv *priv = me->priv;
+	struct rio_irq_handler *h = &priv->ob_dme_irq;
+	u32 dme_stat, dme_no;
+
+	dme_no = me->dme_no;
+	axxia_local_config_read(priv, RAB_OB_DME_STAT(dme_no),
+						&dme_stat);
+
+	if (dme_stat & (OB_DME_STAT_DESC_FETCH_ERR |
+				OB_DME_STAT_DESC_ERR |
+				OB_DME_STAT_DESC_UPD_ERR))
+		dev_err(priv->dev, "OB DME%d: Descriptor Error\n",
+								dme_no);
+	else {
+
+		if (dme_stat & (OB_DME_STAT_DATA_TRANS_ERR |
+				OB_DME_STAT_RESP_ERR |
+				OB_DME_STAT_RESP_TO)) {
+			if (dme_stat & OB_DME_STAT_DATA_TRANS_ERR)
+				dev_err(priv->dev, "OB DME%d: Transaction Error\n",
+								dme_no);
+			if (dme_stat & OB_DME_STAT_RESP_ERR)
+				dev_dbg_ratelimited(priv->dev,
+						"OB DME%d: Response Error\n",
+								dme_no);
+			if (dme_stat & OB_DME_STAT_RESP_TO)
+				dev_err(priv->dev, "OB DME%d: Response Timout Error\n",
+								dme_no);
+		}
+		ob_dme_msg_handler(h, dme_no);
+	}
+	axxia_local_config_write(priv, RAB_OB_DME_STAT(dme_no),
+							dme_stat);
+	hrtimer_forward_now(&obd->tmr, ktime_set(0, axxia_hrtimer_delay));
+	return HRTIMER_RESTART;
+}
+
+static int alloc_ob_dme_shared(struct rio_priv *priv,
+			struct rio_tx_dme *dme_s, int dme_no)
+{
+	int rc = 0;
+	int sz;
+	struct rio_mport *mport = priv->mport;
+	struct rio_msg_dme *me = NULL;
+	struct rio_msg_desc *desc = NULL;
+	u32 dw0, dw1, dw2, dw3;
+	u64  desc_chn_start = 0;
+	int entries = CONFIG_OB_DME_ENTRY_SIZE;
+	int i;
+
+	sz = RIO_OUTB_DME_TO_BUF_SIZE(priv, dme_no);
+	entries = roundup_pow_of_two(entries);
+	pr_info("Configuring DME %d with %d entries\n", dme_no, entries);
+	me = alloc_message_engine(mport,
+				dme_no, NULL, sz, entries);
+	if (IS_ERR(me)) {
+		rc = PTR_ERR(me);
+		goto err;
+	}
+
+	for (i = 0, desc = me->desc; i < entries; i++, desc++) {
+		dw0 = 0;
+		if (!priv->intern_msg_desc) {
+			dw1 = 0;
+			dw2 = (u32)(desc->msg_phys >>  8) & 0x3fffffff;
+			*((u32 *)DESC_TABLE_W0_MEM(me, i)) = dw0;
+			*((u32 *)DESC_TABLE_W1_MEM(me, i)) = dw1;
+			*((u32 *)DESC_TABLE_W2_MEM(me, i)) = dw2;
+			*((u32 *)DESC_TABLE_W3_MEM(me, i)) = 0;
+		} else {
+			dw1 = 0;
+			dw2 = (u32)(desc->msg_phys >> 8) & 0x3fffffff;
+			__rio_local_write_config_32(mport,
+				    DESC_TABLE_W0(me->dres.start+i), dw0);
+			__rio_local_write_config_32(mport,
+				    DESC_TABLE_W1(me->dres.start+i), dw1);
+			__rio_local_write_config_32(mport,
+				    DESC_TABLE_W2(me->dres.start+i), dw2);
+			__rio_local_write_config_32(mport,
+				    DESC_TABLE_W3(me->dres.start+i), 0);
+		}
+	}
+
+
+	/**
+	* Last descriptor - make ring.
+	* Next desc table entry -> dw2.First desc address[37:36]
+	*                       -> dw3.First desc address[35:4].
+	* (desc_base + 0x10 * nr)
+	*/
+	desc--; i--;
+	dw0 |= DME_DESC_DW0_NXT_DESC_VALID;
+	if (!priv->intern_msg_desc) {
+		desc_chn_start =
+			(uintptr_t)virt_to_phys(me->descriptors);
+
+		dw2  = *((u32 *)DESC_TABLE_W2_MEM(me, i));
+		dw2 |= (desc_chn_start >> 4) & 0xc0000000;
+		dw3  = desc_chn_start >> 4;
+		*((u32 *)DESC_TABLE_W0_MEM(me, i)) = dw0;
+		*((u32 *)DESC_TABLE_W2_MEM(me, i)) = dw2;
+		*((u32 *)DESC_TABLE_W3_MEM(me, i)) = dw3;
+	} else {
+		desc_chn_start = DESC_TABLE_W0(me->dres.start);
+		__rio_local_read_config_32(mport,
+				DESC_TABLE_W2(me->dres.start+i), &dw2);
+		dw2 |= ((desc_chn_start >> 8) & 0xc0000000);
+		dw3  = 0;
+		__rio_local_write_config_32(mport,
+				DESC_TABLE_W0(me->dres.start+i), dw0);
+		__rio_local_write_config_32(mport,
+				DESC_TABLE_W2(me->dres.start+i), dw2);
+		__rio_local_write_config_32(mport,
+				DESC_TABLE_W3(me->dres.start+i), dw3);
+	}
+	test_and_set_bit(RIO_DME_OPEN, &me->state);
+	dme_s->me = me;
+	dme_s->ring_size = 0x0;
+	dme_s->ring_size_free = entries;
+err:
+	return rc;
+}
+/**
+ * open_outb_mbox_static - Initialize AXXIA outbound mailbox
+ *			   using statically allocated DME descriptors.
+ *
+ * @mport: Master port implementing the outbound message unit
+ * @dev_id: Device specific pointer to pass on event
+ * @mbox_id: Mailbox to open
+ * @entries: Number of entries in the outbound mailbox ring for each letter
+ * @prio: 0..3, higher number -> lower priority.
+ *
+ * Caller must hold RAB lock
+ * If the specified mbox DME has already been opened/reserved, then we just
+ * abort out of this operation with "busy", and without changing resource
+ * allocation for the mbox DME.
+ *
+ * To increase efficiecny the Descriptors are allocated and initalized during
+ * initialization time and then kept forever to be reused.
+ *
+ * Returns:
+ * %0 if successful
+ * %-EINVAL if an argument is invalid
+ * %-ENOMEM if unable to allocate sufficient memory
+ * %-ENODEV if unable to find a DME matching the input arguments
+ */
+
+static int open_outb_mbox_static(struct rio_mport *mport,
+			void *dev_id, int mbox_id, int entries, int prio)
+{
+	int  rc = 0;
+	int  dme_no, buf_sz = 0;
+	struct rio_priv *priv = mport->priv;
+	struct rio_tx_mbox *mb;/* = priv->ob_mbox[mbox_id];*/
+	struct rio_msg_dme *me = NULL;
+	unsigned long iflags0;
+	u32 dme_ctrl, dme_stat, desc_addr, wait = 0;
+	u64  desc_chn_start = 0;
+
+	if ((mbox_id < 0) || (mbox_id > RIO_MAX_TX_MBOX) ||
+	    (entries < 2) || (entries > priv->desc_max_entries))
+		return -EINVAL;
+	if (priv->ob_mbox[mbox_id])
+		return -EINVAL;
+	mb = kzalloc(sizeof(struct rio_tx_mbox), GFP_KERNEL);
+	if (!mb)
+		return -ENOMEM;
+	spin_lock_init(&mb->lock);
+	mb->dme_no = 0xff;
+#ifdef CONFIG_AXXIA_RIO_STAT
+	mb->sent_msg_count = 0;
+	mb->compl_msg_count = 0;
+#endif
+	spin_lock_irqsave(&mb->lock, iflags0);
+
+	if (test_bit(RIO_MB_OPEN, &mb->state)) {
+		spin_unlock_irqrestore(&mb->lock, iflags0);
+		return -EINVAL;
+	}
+
+	/*
+	** Pick the OB DME that we will use for this mailbox
+	*/
+		buf_sz = RIO_MBOX_TO_BUF_SIZE(mbox_id);
+
+		dme_no = choose_ob_dme_static(priv, mbox_id, buf_sz, &me);
+		if (dme_no < 0) {
+			spin_unlock_irqrestore(&mb->lock, iflags0);
+			rc = dme_no;
+			goto err;
+		}
+		if (IS_ERR_OR_NULL(me)) {
+			spin_unlock_irqrestore(&mb->lock, iflags0);
+			rc = PTR_ERR(me);
+			goto err;
+		}
+
+		if (!test_bit(RIO_DME_MAPPED, &me->state)) {
+			do {
+				axxia_local_config_read(priv,
+					RAB_OB_DME_STAT(dme_no), &dme_stat);
+				if (wait++ > 100) {
+					rc = -EBUSY;
+					goto err;
+				}
+			} while (dme_stat & OB_DME_STAT_TRANS_PEND);
+			desc_chn_start =
+				(uintptr_t)virt_to_phys(me->descriptors);
+
+			dme_ctrl  = (prio & 0x3) << 4;
+			dme_ctrl |= (u32)((desc_chn_start >> 6) & 0xc0000000);
+			desc_addr  = (u32)desc_chn_start >> 4;
+			axxia_local_config_write(priv,
+				RAB_OB_DME_DESC_ADDR(dme_no), desc_addr);
+			axxia_local_config_write(priv, RAB_OB_DME_CTRL(dme_no),
+					dme_ctrl);
+			me->dme_ctrl = dme_ctrl;
+			me->dme_ctrl |= (DME_WAKEUP | DME_ENABLE);
+			priv->ob_dme_irq.irq_state_mask |= (1 << dme_no);
+			axxia_local_config_write(priv, RAB_INTR_STAT_ODME,
+								1<<dme_no);
+			axxia_local_config_write(priv, RAB_INTR_ENAB_ODME,
+					priv->ob_dme_irq.irq_state_mask);
+			test_and_set_bit(RIO_DME_MAPPED, &me->state);
+		}
+
+
+	mb->mport = mport;
+	mb->mbox_no = mbox_id;
+	mb->dme_no = dme_no;
+	mb->me = me;
+	mb->ring_size = entries;
+	mb->tx_slot = 0;
+	mb->dev_id = dev_id;
+	me->sz++;
+	mdelay(500); /* Delay added to ensure completion of any pending TX
+			before Transmission on this Mailbox */
+
+	if (me->sz == 1) {
+		hrtimer_init(&priv->ob_dme_shared[dme_no].tmr,
+				 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		priv->ob_dme_shared[dme_no].tmr.function = ob_dme_tmr_handler;
+		hrtimer_start(&priv->ob_dme_shared[dme_no].tmr,
+				ktime_set(0, (axxia_hrtimer_delay)),
+					HRTIMER_MODE_REL_PINNED);
+	}
+
+	test_and_set_bit(RIO_MB_MAPPED, &mb->state);
+
+	priv->ob_dme_shared[dme_no].ring_size += entries;
+	priv->ob_dme_shared[dme_no].ring_size_free -= entries;
+
+	spin_unlock_irqrestore(&mb->lock, iflags0);
+
+#ifdef CONFIG_AXXIA_RIO_STAT
+	me->desc_done_count = 0;
+	me->desc_error_count = 0;
+	me->desc_rio_err_count = 0;
+	me->desc_axi_err_count = 0;
+	me->desc_tmo_err_count = 0;
+#endif
+	/**
+	 * Finish updating the mailbox and DME state before we go
+	 */
+	test_and_set_bit(RIO_MB_OPEN, &mb->state);
+	priv->ob_mbox[mbox_id] = mb;
+	return 0;
+
+err:
+	spin_unlock_irqrestore(&mb->lock, iflags0);
+	kfree(mb);
+	return rc;
+}
+
+
+/**
+ * release_outb_dme - Close AXXIA outbound DME engine structures
+ * @mport: Master port implementing the outbound message unit
+ * @mbox: Mailbox to close
+ *
+ * Caller must hold RAB lock
+ * Release all resources i.e. DMEs, descriptors, buffers, and so on.
+ */
+
+static void release_outb_dme(struct rio_irq_handler *h)
+{
+	struct rio_priv *priv = h->data;
+	int i;
+	struct rio_msg_dme *me;
+
+	for (i = 0; i < DME_MAX_OB_ENGINES; i++) {
+		me = priv->ob_dme_shared[i].me;
+		if (me && test_bit(RIO_DME_OPEN, &me->state)) {
+			if (test_bit(RIO_DME_MAPPED, &me->state)) {
+				axxia_local_config_write(priv,
+					RAB_OB_DME_CTRL(me->dme_no), 0);
+
+				select_dme(me->dme_no,
+					&priv->num_outb_dmes[0],
+					&priv->outb_dmes_in_use[0],
+					&priv->outb_dmes[0], 0);
+			}
+
+			dme_put(me);
+		}
+	}
+	h->data = NULL;
+}
+
+/**
+ * ib_dme_irq_handler - AXXIA inbound message interrupt handler
+ * @mport: Master port with triggered interrupt
+ * @mask: Interrupt register data
+ *
+ * Handles inbound message interrupts.  Executes a callback, if available,
+ * on received message. Reports the Error.
+ */
+static void  ib_dme_irq_handler(struct rio_irq_handler *h/*, u32 state*/)
+{
+	struct rio_priv *priv = h->data;
+	struct rio_mport *mport = priv->mport;
+	int mbox_no;
+	int letter;
+	u32 dme_mask, mask;
+ib_dme_restart:
+	axxia_local_config_read(priv, RAB_INTR_STAT_IDME, &dme_mask);
+	mask = dme_mask;
+	if (!mask)
+		return;
+	axxia_local_config_write(priv, RAB_INTR_STAT_IDME, mask);
+	/**
+	 * Inbound mbox has 4 engines, 1 per letter.
+	 * For each message engine that contributes to IRQ state,
+	 * go through all descriptors in queue that have been
+	 * written but not handled.
+	 */
+	while (dme_mask) {
+		struct rio_msg_dme *me;
+		u32 dme_stat;
+		int dme_no = __fls(dme_mask);
+		dme_mask ^= (1 << dme_no);
+		me = priv->ib_dme[dme_no];
+		/**
+		 * Get and clear latched state
+		 */
+		axxia_local_config_read(priv,
+					   RAB_IB_DME_STAT(dme_no), &dme_stat);
+		axxia_local_config_write(priv,
+					    RAB_IB_DME_STAT(dme_no), dme_stat);
+		if (!me)
+			continue;
+
+		mbox_no = me->mbox;
+		letter = me->letter;
+		if (!(dme_stat & 0xff))
+			continue;
+
+		if (dme_stat & (IB_DME_STAT_DESC_XFER_CPLT |
+				IB_DME_STAT_DESC_CHAIN_XFER_CPLT)) {
+			if (mport->inb_msg[mbox_no].mcback)
+				mport->inb_msg[mbox_no].mcback(mport,
+					me->dev_id, mbox_no, letter);
+		}
+
+		if (dme_stat & IB_DME_STAT_ERROR_MASK) {
+			if (dme_stat & (IB_DME_STAT_DESC_UPDATE_ERR |
+					IB_DME_STAT_DESC_ERR |
+					IB_DME_STAT_DESC_FETCH_ERR))
+				dev_err(priv->dev,
+				"IB Mbox%d Letter%d DME%d: Descriptor Error\n",
+						mbox_no, letter, dme_no);
+
+			if (dme_stat & IB_DME_STAT_DATA_TRANS_ERR)
+				dev_err(priv->dev,
+				"IB Mbox%d Letter%d DME%d: Transaction Error\n",
+						mbox_no, letter, dme_no);
+
+			if (dme_stat & IB_DME_STAT_MSG_ERR)
+				dev_err(priv->dev,
+				"IB MBOX%d Letter%d DME%d: Message Error\n",
+						mbox_no, letter, dme_no);
+
+			if (dme_stat & (IB_DME_STAT_MSG_TIMEOUT))
+				dev_err(priv->dev,
+				"IB MBOX%d Letter%d DME%d: SRIO Timeout\n",
+						mbox_no, letter, dme_no);
+		}
+
+	}
+	goto ib_dme_restart;
+}
+
+enum hrtimer_restart ib_dme_tmr_handler(struct hrtimer *hr)
+{
+	struct rio_rx_mbox *mb = container_of(hr, struct rio_rx_mbox, tmr);
+	struct rio_mport *mport = mb->mport;
+	struct rio_priv *priv = mport->priv;
+	int mbox_no;
+	int letter;
+	u32 dme_mask, mask;
+ib_dme_restart:
+	axxia_local_config_read(priv, RAB_INTR_STAT_IDME, &dme_mask);
+	dme_mask &= mb->irq_state_mask;
+	mask = dme_mask;
+	if (!mask) {
+		hrtimer_forward_now(&mb->tmr,
+				ktime_set(0, axxia_hrtimer_delay));
+		return HRTIMER_RESTART;
+	}
+	axxia_local_config_write(priv, RAB_INTR_STAT_IDME, mask);
+	/**
+	 * Inbound mbox has 4 engines, 1 per letter.
+	 * For each message engine that contributes to IRQ state,
+	 * go through all descriptors in queue that have been
+	 * written but not handled.
+	 */
+	while (dme_mask) {
+		struct rio_msg_dme *me;
+		u32 dme_stat;
+		int dme_no = __fls(dme_mask);
+		dme_mask ^= (1 << dme_no);
+		me = priv->ib_dme[dme_no];
+		/**
+		 * Get and clear latched state
+		 */
+		axxia_local_config_read(priv,
+					   RAB_IB_DME_STAT(dme_no), &dme_stat);
+		axxia_local_config_write(priv,
+					    RAB_IB_DME_STAT(dme_no), dme_stat);
+		if (!me)
+			continue;
+
+		mbox_no = me->mbox;
+		letter = me->letter;
+		if (!(dme_stat & 0xff))
+			continue;
+
+		if (dme_stat & (IB_DME_STAT_DESC_XFER_CPLT |
+				IB_DME_STAT_DESC_CHAIN_XFER_CPLT)) {
+			if (mport->inb_msg[mbox_no].mcback)
+				mport->inb_msg[mbox_no].mcback(mport,
+					me->dev_id, mbox_no, letter);
+		}
+
+		if (dme_stat & IB_DME_STAT_ERROR_MASK) {
+			if (dme_stat & (IB_DME_STAT_DESC_UPDATE_ERR |
+					IB_DME_STAT_DESC_ERR |
+					IB_DME_STAT_DESC_FETCH_ERR))
+				dev_err(priv->dev,
+				"IB Mbox%d Letter%d DME%d: Descriptor Error\n",
+						mbox_no, letter, dme_no);
+
+			if (dme_stat & IB_DME_STAT_DATA_TRANS_ERR)
+				dev_err(priv->dev,
+				"IB Mbox%d Letter%d DME%d: Transaction Error\n",
+						mbox_no, letter, dme_no);
+
+			if (dme_stat & IB_DME_STAT_MSG_ERR)
+				dev_err(priv->dev,
+				"IB MBOX%d Letter%d DME%d: Message Error\n",
+						mbox_no, letter, dme_no);
+
+			if (dme_stat & (IB_DME_STAT_MSG_TIMEOUT))
+				dev_err(priv->dev,
+				"IB MBOX%d Letter%d DME%d: SRIO Timeout\n",
+						mbox_no, letter, dme_no);
+		}
+
+	}
+	goto ib_dme_restart;
+}
+/**
+ * open_inb_mbox - Initialize AXXIA inbound mailbox
+ * @mport: Master port implementing the inbound message unit
+ * @dev_id: Device specific pointer to pass on event
+ * @mbox: Mailbox to open 0..(MID-1),
+ *            0..3 multi segment,
+ *            4..(MID-1) single segment
+ * @entries: Number of entries in the inbound mailbox ring
+ *
+ * Initializes buffer ring.  Sets up desciptor ring and memory
+ * for messages for all 4 letters in the mailbox.  [This means
+ * that the actual descriptor requirements are "4 * entries".]
+ *
+ * Returns %0 on success and %-EINVAL or %-ENOMEM on failure.
+ */
+static int open_inb_mbox(struct rio_mport *mport, void *dev_id,
+			 int mbox, int entries)
+{
+	struct rio_priv *priv = mport->priv;
+	struct rio_irq_handler *h = NULL;
+	int i, letter;
+	int rc, buf_sz;
+	u32 irq_state_mask = 0;
+	u32 dme_ctrl;
+	struct rio_rx_mbox *mb;
+	int j;
+
+	if ((mbox < 0) || (mbox >= RIO_MAX_RX_MBOX))
+		return -EINVAL;
+
+	if ((entries < 2) || (entries > priv->desc_max_entries))
+		return -EINVAL;
+	h = &priv->ib_dme_irq;
+
+	if (priv->ib_mbox[mbox] != NULL)
+		return -EBUSY;
+
+	buf_sz = RIO_MBOX_TO_BUF_SIZE(mbox);
+
+	mb = kzalloc(sizeof(*mb), GFP_KERNEL);
+	if (!mb)
+		return -ENOMEM;
+	mb->mbox_no = mbox;
+
+	kref_init(&mb->kref);
+/* Adding 1 to entries to ensure the presence of invalid descriptor
+ * in the circular buffer, to avoid the hardware getting into an
+ * indefinite loop */
+	entries = entries+1;
+/* Rounding up to the power of two for efficient handling */
+	entries = roundup_pow_of_two(entries);
+	dev_dbg(priv->dev, "Opening inbound mbox %d with %d entries\n",
+							mbox, entries);
+	/**
+	 *  Initialize rx buffer ring
+	 */
+	mb->mport = mport;
+	mb->ring_size = entries;
+	for (i = 0; i < RIO_MSG_MAX_LETTER; i++) {
+		mb->virt_buffer[i] = kzalloc(mb->ring_size * sizeof(void *),
+								GFP_KERNEL);
+		if (!mb->virt_buffer[i]) {
+			kfree(mb);
+			return -ENOMEM;
+		}
+		mb->last_rx_slot[i] = 0;
+		mb->next_rx_slot[i] = 0;
+		for (j = 0; j < mb->ring_size; j++)
+			mb->virt_buffer[i][j] = NULL;
+	}
+
+	/**
+	 * Since we don't have the definition of letter in the generic
+	 * RIO layer, we set up IB mailboxes for all letters for each
+	 * mailbox.
+	 */
+	for (letter = 0; letter < RIO_MSG_MAX_LETTER; ++letter) {
+		int dme_no = 0;
+		struct rio_msg_dme *me = NULL;
+		struct rio_msg_desc *desc;
+		u32 dw0, dw1, dw2, dw3;
+		u64 desc_chn_start, desc_addr;
+		u32 dme_stat, wait = 0;
+		u32 buffer_size = (buf_sz > 256 ? 3 : 0);
+
+		/* Search for a free DME, so we can more efficiently map
+		 * them to the all of the mbox||letter combinations. */
+		for (i = 0, rc = -1;
+		     i < (priv->num_inb_dmes[0]+priv->num_inb_dmes[1]);
+		     i++) {
+			rc = check_dme(i, &priv->num_inb_dmes[0],
+				&priv->inb_dmes_in_use[0], &priv->inb_dmes[0]);
+			if (rc == 0) {
+				dme_no = i;
+				break;
+			}
+		}
+		if (rc < 0)
+			return rc;
+
+		me = alloc_message_engine(mport,
+					  dme_no,
+					  dev_id,
+					  buf_sz,
+					  entries);
+		if (IS_ERR(me)) {
+			rc = PTR_ERR(me);
+			goto err;
+		}
+
+		irq_state_mask |= (1 << dme_no);
+
+		do {
+			axxia_local_config_read(priv,
+						   RAB_IB_DME_STAT(me->dme_no),
+						   &dme_stat);
+			if (wait++ > 100) {
+				rc = -EBUSY;
+				goto err;
+			}
+		} while (dme_stat & IB_DME_STAT_TRANS_PEND);
+
+		mb->me[letter] = me;
+
+		dw0 = ((buffer_size & 0x3) << 4) |
+		      DME_DESC_DW0_EN_INT;
+			/*Valid bit will be set in add_inb_buffer*/
+
+		dw1 = DME_DESC_DW1_XMBOX(mbox) |
+		      DME_DESC_DW1_MBOX(mbox)  |
+		      DME_DESC_DW1_LETTER(letter);
+		dw3 = 0;		/* 0 means, next contiguous addr
+					 * Also next desc valid bit in dw0
+					 * must be zero. */
+		for (i = 0, desc = me->desc; i < entries; i++, desc++) {
+			if (!priv->intern_msg_desc) {
+				/* Reference AXX5500 Peripheral Subsystem
+				 * Multicore Reference Manual, January 2013,
+				 * Chapter 5, p. 584 */
+				dw1 |= 0;
+				dw2  = (u32)(desc->msg_phys >> 8) & 0x3fffffff;
+				*((u32 *)DESC_TABLE_W0_MEM(me,
+						 i)) = dw0;
+				*((u32 *)DESC_TABLE_W1_MEM(me,
+						 i)) = dw1;
+				*((u32 *)DESC_TABLE_W2_MEM(me,
+						 i)) = dw2;
+				*((u32 *)DESC_TABLE_W3_MEM(me,
+						 i)) = dw3;
+			} else {
+				dw1 |= 0;
+				dw2  = (u32)(desc->msg_phys >> 8) & 0x3fffffff;
+				__rio_local_write_config_32(mport,
+					DESC_TABLE_W0(me->dres.start+i), dw0);
+				__rio_local_write_config_32(mport,
+					DESC_TABLE_W1(me->dres.start+i), dw1);
+				__rio_local_write_config_32(mport,
+					DESC_TABLE_W2(me->dres.start+i), dw2);
+				__rio_local_write_config_32(mport,
+					DESC_TABLE_W3(me->dres.start+i), dw3);
+			}
+		}
+
+		/**
+		 * Last descriptor - make ring.
+		 * Next desc table entry -> dw2.First desc address[37:36].
+		 *                       -> dw3.First desc address[35:4].
+		 * (desc_base + 0x10 * nr)
+		 */
+		desc--; i--;
+		dw0 |= DME_DESC_DW0_NXT_DESC_VALID;
+		dw0 &= ~DME_DESC_DW0_VALID;
+		if (!priv->intern_msg_desc) {
+			desc_chn_start =
+				(uintptr_t)virt_to_phys(me->descriptors);
+
+			dw2  = *((u32 *)DESC_TABLE_W2_MEM(me, i));
+			dw2 |= (desc_chn_start >> 4) & 0xc0000000;
+			dw3  = desc_chn_start >> 4;
+			*((u32 *)DESC_TABLE_W0_MEM(me, i)) = dw0;
+			*((u32 *)DESC_TABLE_W2_MEM(me, i)) = dw2;
+			*((u32 *)DESC_TABLE_W3_MEM(me, i)) = dw3;
+		} else {
+			desc_chn_start = DESC_TABLE_W0(me->dres.start);
+
+			__rio_local_read_config_32(mport,
+					    DESC_TABLE_W2(me->dres.start+i),
+					    &dw2);
+			dw3  = 0;
+			dw2 |= ((desc_chn_start >> 8) & 0xc0000000);
+			__rio_local_write_config_32(mport,
+						DESC_TABLE_W0(me->dres.start+i),
+						dw0);
+			__rio_local_write_config_32(mport,
+						DESC_TABLE_W2(me->dres.start+i),
+						dw2);
+			__rio_local_write_config_32(mport,
+						DESC_TABLE_W3(me->dres.start+i),
+						dw3);
+		}
+
+		/**
+		 * Setup the DME including descriptor chain start address
+		 */
+		dme_ctrl = RAB_IB_DME_CTRL_XMBOX(mbox)    |
+			   RAB_IB_DME_CTRL_MBOX(mbox)     |
+			   RAB_IB_DME_CTRL_LETTER(letter) |
+			   DME_WAKEUP                     |
+			   DME_ENABLE;
+		dme_ctrl |= (u32)((desc_chn_start >> 6) & 0xc0000000);
+		desc_addr  = (u32)desc_chn_start >> 4;
+
+		me->dme_ctrl = dme_ctrl;
+		me->letter = letter;
+		me->mbox = mbox;
+		priv->ib_dme[dme_no] = me;
+
+		axxia_local_config_write(priv,
+					RAB_IB_DME_DESC_ADDR(dme_no),
+					desc_addr);
+		axxia_local_config_write(priv,
+					RAB_IB_DME_CTRL(dme_no), dme_ctrl);
+
+#ifdef CONFIG_AXXIA_RIO_STAT
+		me->desc_done_count = 0;
+		me->desc_error_count = 0;
+		me->desc_rio_err_count = 0;
+		me->desc_axi_err_count = 0;
+		me->desc_tmo_err_count = 0;
+#endif
+		select_dme(dme_no, &priv->num_inb_dmes[0],
+			&priv->inb_dmes_in_use[0], &priv->inb_dmes[0], 1);
+	}
+
+	/**
+	* Create irq handler and enable MBOX irq
+	*/
+
+	mb->irq_state_mask = irq_state_mask;
+	h->irq_state_mask |= irq_state_mask;
+	priv->ib_mbox[mbox] = mb;
+	AXXIA_RIO_SYSMEM_BARRIER();
+	axxia_local_config_write(priv, RAB_INTR_STAT_IDME, irq_state_mask);
+
+	if (priv->dme_mode == AXXIA_IBDME_TIMER_MODE) {
+		hrtimer_init(&mb->tmr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		mb->tmr.function = ib_dme_tmr_handler;
+		hrtimer_start(&mb->tmr, ktime_set(0, (axxia_hrtimer_delay)),
+					HRTIMER_MODE_REL_PINNED);
+	} else
+		axxia_local_config_write(priv, RAB_INTR_ENAB_IDME,
+						h->irq_state_mask);
+	return 0;
+
+err:
+	mbox_put(mb);
+	return rc;
+}
+
+/**
+ * release_inb_mbox - Close AXXIA inbound mailbox
+ * @mport: Master port implementing the inbound message unit
+ * @mbox: Mailbox to close
+ *
+ * Caller must hold RAB lock
+ * Release all resources i.e. DMEs, descriptors, buffers, and so on.
+ */
+
+static void release_inb_mbox(struct rio_irq_handler *h)
+{
+	struct rio_rx_mbox *mb = h->data;
+/*TODO*/
+	h->data = NULL;
+	mbox_put(mb);
+}
+
+void axxia_rio_port_get_state(struct rio_mport *mport, int cleanup)
+{
+	struct rio_priv *priv = mport->priv;
+	u32 escsr, iecsr, state;
+
+	if (cleanup) {
+#if defined(CONFIG_AXXIA_RIO_STAT)
+		reset_state_counters(priv);
+#endif
+		/**
+		 * Clear latched state indications
+		 */
+		/* Miscellaneous Events */
+		axxia_local_config_read(priv, RAB_INTR_STAT_MISC, &state);
+		axxia_local_config_write(priv, RAB_INTR_STAT_MISC, state);
+		/* Outbound Message Engine */
+		axxia_local_config_read(priv, RAB_INTR_STAT_ODME, &state);
+		axxia_local_config_write(priv, RAB_INTR_STAT_ODME , state);
+		/* Inbound Message Engine */
+		axxia_local_config_read(priv, RAB_INTR_STAT_IDME, &state);
+		axxia_local_config_write(priv, RAB_INTR_STAT_IDME, state);
+		/* Axxi Bus to RIO Events */
+		axxia_local_config_read(priv, RAB_INTR_STAT_APIO, &state);
+		axxia_local_config_write(priv, RAB_INTR_STAT_APIO, state);
+		/* RIO to Axxia Bus Events */
+		axxia_local_config_read(priv, RAB_INTR_STAT_RPIO, &state);
+		axxia_local_config_write(priv, RAB_INTR_STAT_RPIO, state);
+	}
+
+	/* Master Port state */
+	axxia_local_config_read(priv, RIO_ESCSR(priv->port_ndx), &escsr);
+	axxia_local_config_read(priv, EPC_IECSR(priv->port_ndx), &iecsr);
+
+	/* Adding I2E to preserve idle sequence select bit which is R/w */
+	axxia_local_config_write(priv, RIO_ESCSR(priv->port_ndx),
+				(escsr & (RIO_ESCSR_I2E | RIO_EXCSR_WOLR)));
+}
+
+/**
+ * RIO MPORT Driver API
+ */
+
+/**
+ * axxia_rio_port_irq_enable - Register RIO interrupt handler
+ *
+ * @mport: master port
+ * @irq: IRQ mapping from DTB
+ *
+ * Caller must hold RAB lock
+ *
+ * Returns:
+ * 0        Success
+ * <0       Failure
+ */
+int axxia_rio_port_irq_enable(struct rio_mport *mport)
+{
+	struct rio_priv *priv = mport->priv;
+	int rc;
+
+	priv->dme_mode = axxia_dme_tmr_mode[priv->ndx];
+	/**
+	 * Clean up history
+	 * from port reset/restart
+	 */
+	axxia_rio_port_get_state(mport, 1);
+	rc = alloc_irq_handler(&priv->misc_irq, priv, "rio-misc-db");
+	if (rc)
+		goto out;
+
+	rc = alloc_irq_handler(&priv->apio_irq, priv, "rio-apio");
+	if (rc)
+		goto err2;
+
+	rc = alloc_irq_handler(&priv->rpio_irq, priv, "rio-rpio");
+	if (rc)
+		goto err3;
+	priv->ib_dme_irq.data = priv;
+	priv->ob_dme_irq.data = priv;
+
+	if (priv->dme_mode == AXXIA_IBDME_INTERRUPT_MODE) {
+		rc = request_threaded_irq(priv->irq_line,
+				hw_irq_dme_handler, NULL,
+				IRQF_TRIGGER_NONE | IRQF_SHARED,
+				"rio-mb", (void *)priv);
+		if (rc)
+			goto err4;
+
+		axxia_local_config_write(priv, RAB_INTR_ENAB_GNRL,
+				    (RAB_INTR_ENAB_GNRL_SET | IB_DME_INT_EN));
+	} else
+		axxia_local_config_write(priv, RAB_INTR_ENAB_GNRL,
+				    RAB_INTR_ENAB_GNRL_SET);
+out:
+	return rc;
+err0:
+	dev_warn(priv->dev, "RIO: unable to request irq.\n");
+	goto out;
+err4:
+	release_irq_handler(&priv->rpio_irq);
+err3:
+	release_irq_handler(&priv->apio_irq);
+err2:
+	release_irq_handler(&priv->misc_irq);
+	goto err0;
+}
+
+void axxia_rio_port_irq_disable(struct rio_mport *mport)
+{
+	struct rio_priv *priv = mport->priv;
+	/**
+	 * Mask top level IRQs
+	 */
+	axxia_local_config_write(priv, RAB_INTR_ENAB_GNRL, 0);
+	/**
+	 * free registered handlers
+	 */
+	release_irq_handler(&priv->misc_irq);
+	release_irq_handler(&priv->ob_dme_irq);
+	release_irq_handler(&priv->ib_dme_irq);
+	release_irq_handler(&priv->apio_irq);
+	release_irq_handler(&priv->rpio_irq);
+}
+
+int axxia_rio_pw_enable(struct rio_mport *mport, int enable)
+{
+	struct rio_priv *priv = mport->priv;
+	int rc = 0;
+
+	mutex_lock(&priv->api_lock);
+	if (enable)
+		rc = enable_pw(mport);
+	else
+		disable_pw(mport);
+	mutex_unlock(&priv->api_lock);
+
+	return rc;
+}
+
+/**
+ * axxia_rio_doorbell_send - Send a doorbell message
+ *
+ * @mport: RapidIO master port info
+ * @index: ID of RapidIO interface
+ * @destid: Destination ID of target device
+ * @data: 16-bit info field of RapidIO doorbell message
+ *
+ * Sends a doorbell message.
+ *
+ * Returns %0 on success or %-EINVAL on failure.
+ *
+ * API protected by spin lock in generic rio driver.
+ */
+int axxia_rio_doorbell_send(struct rio_mport *mport,
+			    int index, u16 destid, u16 data)
+{
+	struct rio_priv *priv = mport->priv;
+	int db;
+	u32 csr;
+
+	for (db = 0; db < MAX_OB_DB; db++) {
+		axxia_local_config_read(priv, RAB_OB_DB_CSR(db), &csr);
+		if (OB_DB_STATUS(csr) == OB_DB_STATUS_DONE &&
+		    OB_DB_STATUS(csr) != OB_DB_STATUS_RETRY) {
+
+			csr = 0;
+			csr |= OB_DB_DEST_ID(destid);
+			csr |= OB_DB_PRIO(0x2); /* Good prio? */
+			csr |= OB_DB_SEND;
+			dev_dbg(priv->dev,
+			   "Send doorbell 0x%04x to destid 0x%x\n",
+				data, destid);
+			axxia_local_config_write(priv, RAB_OB_DB_INFO(db),
+						    OB_DB_INFO(data));
+			axxia_local_config_write(priv, RAB_OB_DB_CSR(db),
+						    csr);
+			break;
+		}
+	}
+	if (db == MAX_OB_DB)
+		return -EBUSY;
+
+	return 0;
+}
+
+/************/
+/* OUTBOUND */
+/************/
+/**
+ * axxia_open_outb_mbox - Initialize AXXIA outbound mailbox
+ * @mport: Master port implementing the outbound message unit
+ * @dev_id: Device specific pointer to pass on event
+ * @mbox_dme: Mailbox to open
+ * @entries: Number of entries in the outbound DME/mailbox ring for
+ *           each letter
+ *
+ * Allocates and initializes descriptors.
+ * We have N (e.g. 3) outbound mailboxes and M (e.g. 1024) message
+ * descriptors.  The message descriptors are usable by inbound and
+ * outbound message queues, at least until the point of binding.
+ * Allocation/Distribution of message descriptors is flexible and
+ * not restricted in any way other than that they must be uniquely
+ * assigned/coherent to each mailbox/DME.
+ *
+ * Allocate memory for messages.
+ * Each descriptor can hold a message of up to 4kB, though certain
+ * DMEs or mailboxes may impose further limits on the size of the
+ * messages.
+ *
+ * Returns %0 on success and %-EINVAL or %-ENOMEM on failure.
+ */
+int axxia_open_outb_mbox(
+	struct rio_mport *mport,
+	void *dev_id,
+	int mbox_dme,
+	int entries/*,
+	int prio*/)
+{
+	struct rio_priv *priv = mport->priv;
+	int rc = 0;
+
+	mutex_lock(&priv->api_lock);
+	rc = open_outb_mbox_static(mport, dev_id, mbox_dme,
+					entries, 0x0/*prio*/);
+	mutex_unlock(&priv->api_lock);
+
+	return rc;
+}
+
+/**
+ * axxia_close_outb_mbox - Shut down AXXIA outbound mailbox
+ *
+ * @mport: Master port implementing the outbound message unit
+ * @mbox_id: Mailbox to close
+ *
+ * Disables the outbound message unit, frees all buffers, and
+ * frees any other resources.
+ */
+void axxia_close_outb_mbox(struct rio_mport *mport, int mbox_id)
+{
+	struct rio_priv *priv = mport->priv;
+	int dme_no;
+	int wait_cnt = 0;
+	struct rio_msg_dme *me;
+	struct rio_tx_mbox *mb = NULL;
+
+
+	if ((mbox_id < 0) ||
+	    (mbox_id > RIO_MAX_TX_MBOX))
+		return;
+	mb = priv->ob_mbox[mbox_id];
+	if ((!mb) ||
+	    (!test_bit(RIO_MB_OPEN, &mb->state)))
+		return;
+	me = mb->me;
+
+	mutex_lock(&priv->api_lock);
+	clear_bit(RIO_MB_OPEN, &priv->ob_mbox[mbox_id]->state);
+	while (me->write_idx != me->read_idx) {
+		msleep(20);
+		wait_cnt++;
+		if (wait_cnt > 250)
+			break;
+	}
+	if (wait_cnt > 250)
+		pr_debug("Closed when outb mbox%d while transaction pending\n",
+								mbox_id);
+	priv->ob_mbox[mbox_id] = NULL;
+	dme_no = mb->dme_no;
+	mb->dme_no = 0xff;
+
+	priv->ob_dme_shared[dme_no].ring_size -=
+		mb->ring_size;
+
+	priv->ob_dme_shared[dme_no].ring_size_free +=
+		mb->ring_size;
+	mb->dev_id = NULL;
+	clear_bit(RIO_MB_MAPPED, &mb->state);
+	kfree(mb);
+	me->sz--;
+	if (!(me->sz))
+		hrtimer_cancel(&priv->ob_dme_shared[dme_no].tmr);
+	mutex_unlock(&priv->api_lock);
+	return;
+}
+
+static inline struct rio_msg_desc *get_ob_desc(struct rio_mport *mport,
+						struct rio_msg_dme *mb)
+{
+	int desc_num = mb->write_idx;
+	struct rio_priv *priv = mport->priv;
+	struct rio_msg_desc *desc = &mb->desc[desc_num];
+	int nxt_write_idx = (mb->write_idx + 1) & (mb->entries - 1);
+	u32 dw0;
+	if (nxt_write_idx != mb->read_idx) {
+		dw0 = *((u32 *)DESC_TABLE_W0_MEM(mb, desc_num));
+		if (!(dw0 & DME_DESC_DW0_VALID))
+			return desc;
+		else
+			dev_err(priv->dev, "Tx Desc error %d\n", mb->write_idx);
+	}
+	return NULL;
+}
+
+/**
+ * axxia_add_outb_message - Add message to the AXXIA outbound message queue
+ * --- Called in net core soft IRQ with local interrupts masked ---
+ * --- And spin locked in master port net device handler        ---
+ *
+ * @mport: Master port with outbound message queue
+ * @rdev: Target of outbound message
+ * @mbox_dest: Destination mailbox
+ * @letter: TID letter
+ * @flags: 3 bit field,Critical Request Field[2] | Prio[1:0]
+ * @buffer: Message to add to outbound queue
+ * @len: Length of message
+ *
+ * Adds the @buffer message to the AXXIA outbound message queue.
+ * Returns %0 on success
+ *         %-EBUSY  on temporarily unavailable resource failure e.g. such
+ *                     as waiting for an open entry in the outbound DME
+ *                     descriptor chain
+ *         %-EAGAIN on another kind of temporarily unavailable resource
+ *                     failure
+ *         %-EINVAL on invalid argument failure.
+ *         %-ENODEV on unavailable resource failure e.g. no outbound DME
+ *                     open that matches the kind of destination mailbox
+ *         %-ENXIO  on incompatible argument failure e.g. trying to open
+ *                     a single-segment mbox when none are available on
+ *                     the platform
+ */
+int axxia_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev,
+			     int mbox_dest, int letter, int flags,
+			     void *buffer, size_t len/*, void *cookie*/)
+{
+	int rc = 0;
+	u32 dw0, dw1;
+	u16 destid = (rdev ? rdev->destid : mport->host_deviceid);
+	struct rio_priv *priv = mport->priv;
+	struct rio_tx_mbox *mb = priv->ob_mbox[mbox_dest];
+	struct rio_msg_dme *me;
+	struct rio_msg_desc *desc;
+	u32 dw2_r, dw2;
+	u32 idx;
+	u32 seg;
+	u32 lock = 0;
+	u32 cp = 0;
+
+	if (!mb)
+		return -EINVAL;
+	me = mb->me;
+	if (me->sz > 1)
+		lock = 1;
+
+	/* Choose a free descriptor in a critical section */
+	if (lock)
+		spin_lock(&me->lock);
+	desc = get_ob_desc(mport, me);
+	if (!desc) {
+		rc = -EBUSY;
+		goto done;
+	}
+
+
+	/* Copy and clear rest of buffer */
+	if ((u32)buffer > PAGE_OFFSET) {
+		if ((u32)buffer & 0xFF) {
+			if (unlikely(desc->msg_virt == NULL)) {
+				rc = -ENXIO;
+				goto done;
+			}
+			memcpy(desc->msg_virt, buffer, len);
+			cp = 1;
+		}
+	} else {
+		if (copy_from_user(desc->msg_virt, buffer, len)) {
+			rc = -ENXIO;
+			goto done;
+		}
+		cp = 1;
+	}
+
+	dw0 = DME_DESC_DW0_SRC_DST_ID(destid) |
+	/*	DME_DESC_DW0_EN_INT|*/
+		DME_DESC_DW0_VALID;
+
+#if 0
+	if (!(me->write_idx % 4))
+		dw0 |=	DME_DESC_DW0_EN_INT;
+#endif
+	dw0 |= desc->last;/*DME_DESC_DW0_NXT_DESC_VALID;*/
+	seg = len;
+	if (seg < 256)
+		seg = 256;
+	seg = roundup_pow_of_two(seg) >> 7;
+	dw1 = DME_DESC_DW1_PRIO(flags) |
+		DME_DESC_DW1_CRF(flags) |
+		(fls(seg)<<18) |
+		DME_DESC_DW1_MSGLEN(len) |
+		DME_DESC_DW1_XMBOX(mbox_dest) |
+		DME_DESC_DW1_MBOX(mbox_dest) |
+		DME_DESC_DW1_LETTER(letter);
+	idx = me->write_idx;
+	dw2_r  = *((u32 *)DESC_TABLE_W2_MEM(me, idx));
+	if (cp)
+		dw2 = (u32)(desc->msg_phys >> 8) & 0x3fffffff;
+	else
+		dw2 = (u32)(virt_to_phys(buffer) >> 8) & 0x3fffffff;
+	dw2 = (dw2_r & 0xc0000000) | dw2;
+	me->write_idx = (me->write_idx+1) & (me->entries - 1);
+	*((u32 *)DESC_TABLE_W2_MEM(me, idx)) = dw2;
+	*((u32 *)DESC_TABLE_W1_MEM(me, idx)) = dw1;
+	AXXIA_RIO_SYSMEM_BARRIER();
+	*((u32 *)DESC_TABLE_W0_MEM(me, idx)) = dw0;
+
+	if (lock)
+		spin_unlock(&me->lock);
+	else
+		AXXIA_RIO_SYSMEM_BARRIER();
+	/* Start / Wake up - the stored state is used to avoid a Read */
+	axxia_local_config_write(priv, RAB_OB_DME_CTRL(me->dme_no),
+							me->dme_ctrl);
+
+#ifdef CONFIG_AXXIA_RIO_STAT
+	priv->ob_mbox[mbox_dest]->sent_msg_count++;
+#endif
+	return rc;
+done:
+	if (lock)
+		spin_unlock(&me->lock);
+	return rc;
+}
+
+int axxia_ml_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev,
+			     int mbox_dest, void *buffer, size_t len)
+{
+	return axxia_add_outb_message(mport, rdev, mbox_dest, 0, 0, buffer,
+						len/*, NULL*/);
+}
+
+/**
+ * axxia_open_inb_mbox - Initialize AXXIA inbound mailbox
+ * @mport: Master port implementing the inbound message unit
+ * @dev_id: Device specific pointer to pass on event
+ * @mbox: Mailbox to open
+ * @entries: Number of entries in the inbound mailbox ring
+ *
+ * Initializes buffer ring.  Set up descriptor ring and memory
+ * for messages for all letters in the mailbox.
+ * Returns %0 on success and %-EINVAL or %-ENOMEM on failure.
+ */
+int axxia_open_inb_mbox(struct rio_mport *mport, void *dev_id,
+			int mbox, int entries)
+{
+	struct rio_priv *priv = mport->priv;
+	int rc = 0;
+
+	mutex_lock(&priv->api_lock);
+	rc = open_inb_mbox(mport, dev_id, mbox, entries);
+	mutex_unlock(&priv->api_lock);
+
+	return rc;
+}
+
+/**
+ * axxia_close_inb_mbox - Shut down AXXIA inbound mailbox
+ * @mport: Master port implementing the inbound message unit
+ * @mbox: Mailbox to close
+ *
+ * Disables the inbound message unit, free all buffers, and
+ * frees resources.
+ */
+void axxia_close_inb_mbox(struct rio_mport *mport, int mbox)
+{
+	struct rio_priv *priv = mport->priv;
+	struct rio_rx_mbox *mb;
+	u32 letter;
+	u32 dme_stat;
+	u32 dme_no;
+
+	if ((mbox < 0) || (mbox >= RIO_MAX_RX_MBOX))
+		return;
+	mutex_lock(&priv->api_lock);
+	mb = priv->ib_mbox[mbox];
+	if (mb == NULL) {
+		mutex_unlock(&priv->api_lock);
+		return;
+	}
+	priv->ib_dme_irq.irq_state_mask &= ~(mb->irq_state_mask);
+	axxia_local_config_write(priv, RAB_INTR_ENAB_IDME,
+					priv->ib_dme_irq.irq_state_mask);
+	axxia_local_config_write(priv, RAB_INTR_STAT_IDME, mb->irq_state_mask);
+	msleep(500);
+	priv->ib_mbox[mbox] = NULL;
+	for (letter = 0; letter < RIO_MSG_MAX_LETTER; letter++) {
+		int wait = 0;
+		if (mb->me[letter]) {
+			dme_no = mb->me[letter]->dme_no;
+			do {
+				axxia_local_config_read(priv,
+					RAB_IB_DME_STAT(dme_no), &dme_stat);
+				if (wait++ > 10000)
+					break;
+			} while (dme_stat & IB_DME_STAT_TRANS_PEND);
+			if (wait > 10000)
+				dev_err(priv->dev,
+					"Closing while Transaction pending\n");
+			axxia_local_config_write(priv,
+					RAB_IB_DME_CTRL(dme_no), 0);
+		}
+	}
+	axxia_local_config_write(priv, RAB_INTR_STAT_IDME, mb->irq_state_mask);
+	mb->irq_state_mask = 0;
+	msleep(100);
+	if (priv->dme_mode == AXXIA_IBDME_TIMER_MODE)
+		hrtimer_cancel(&mb->tmr);
+	mbox_put(mb);
+	mutex_unlock(&priv->api_lock);
+	return;
+}
+
+/**
+ * axxia_add_inb_buffer - Add buffer to the AXXIA inbound message queue
+ * @mport: Master port implementing the inbound message unit
+ * @mbox: Inbound mailbox number
+ * @buf: Buffer to add to inbound queue
+ *
+ * Adds the @buf buffer to the AXXIA inbound message queue.
+ *
+ * Returns %0 on success
+ *         %-EINVAL on invalid argument failure.
+ *         %-EBUSY  on temporarily unavailable resource failure e.g. such
+ *                     as waiting for a filled entry in the inbound DME
+ *                     descriptor chain
+ */
+int axxia_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
+{
+	struct rio_priv *priv = mport->priv;
+	struct rio_rx_mbox *mb;
+	int rc = 0;
+	struct rio_msg_dme *me;
+	struct rio_msg_desc *desc;
+	u32 dw0, dw2, dw2_r;
+
+	mb = (priv->ib_mbox[mbox]);
+	if (!mb)
+		return -EINVAL;
+	me = mb->me[0];
+	/* Lockless circular buffer scheme */
+	if (((me->write_idx + 1) & (me->entries - 1)) == me->read_idx)
+		goto busy;
+	if (mb->virt_buffer[0][me->write_idx]) {
+		/* TODO Need to handle this case when DME encounters error */
+		goto busy;
+	}
+
+	dw0 = *((u32 *)DESC_TABLE_W0_MEM(me, me->write_idx));
+	if (dw0 & DME_DESC_DW0_VALID) {
+		dev_dbg(priv->dev, "Filling an already valid buffer %d %x\n",
+							 me->write_idx, dw0);
+		goto busy;
+	}
+	mb->virt_buffer[0][me->write_idx] = buf;
+	if (!((u32)buf & 0xFF)) {
+		dw2_r = *((u32 *)DESC_TABLE_W2_MEM(me, me->write_idx));
+		dw2 = (u32)(virt_to_phys(buf) >> 8) & 0x3fffffff;
+		dw2 = (dw2_r & 0xc0000000) | dw2;
+		*((u32 *)DESC_TABLE_W2_MEM(me, me->write_idx)) = dw2;
+	} else {
+		desc = &me->desc[me->write_idx];
+		dw2_r = *((u32 *)DESC_TABLE_W2_MEM(me, me->write_idx));
+		dw2 = (u32)(desc->msg_phys >> 8) & 0x3fffffff;
+		dw2 = (dw2_r & 0xc0000000) | dw2;
+		*((u32 *)DESC_TABLE_W2_MEM(me, me->write_idx)) = dw2;
+	}
+
+	AXXIA_RIO_SYSMEM_BARRIER();
+	dw0 |= DME_DESC_DW0_VALID;
+	*((u32 *)DESC_TABLE_W0_MEM(me, me->write_idx)) = dw0;
+	AXXIA_RIO_SYSMEM_BARRIER();
+	me->write_idx = (me->write_idx + 1) & (me->entries - 1);
+/*	axxia_local_config_read(priv,
+		RAB_IB_DME_CTRL(me->dme_no), &dme_ctrl);
+	dme_ctrl |= (DME_WAKEUP | DME_ENABLE);*/
+	axxia_local_config_write(priv,
+		RAB_IB_DME_CTRL(me->dme_no), me->dme_ctrl);
+done:
+	return rc;
+busy:
+	rc = -EBUSY;
+	goto done;
+}
+
+/**
+ * axxia_get_inb_message - Fetch an inbound message from the AXXIA
+ *                         message unit
+ * @mport: Master port implementing the inbound message unit
+ * @mbox: Inbound mailbox number
+ * @letter: Inbound mailbox letter
+ * @sz: size of returned buffer
+ *
+ * Gets the next available inbound message from the inbound message queue.
+ *
+ * Returns pointer to the message on success
+ *         NULL on nothing available
+ *         IS_ERR(ptr) on failure with extra information
+ */
+void *axxia_get_inb_message(struct rio_mport *mport, int mbox, int letter,
+			    int *sz/*, int *slot, u16 *destid*/)
+{
+	struct rio_priv *priv = mport->priv;
+	struct rio_rx_mbox *mb;
+	struct rio_msg_dme *me;
+	int num_proc = 0;
+	void *buf = NULL;
+	u32 idx;
+
+	mb = (priv->ib_mbox[mbox]);
+	if (!mb)
+		return NULL;
+	me = (mb->me[letter]);
+	while (1) {
+		struct rio_msg_desc *desc = &me->desc[me->read_idx];
+		u32 dw0, dw1;
+		idx = me->read_idx;
+		buf = NULL;
+		*sz = 0;
+		dw0 = *((u32 *)DESC_TABLE_W0_MEM(me, idx));
+		dw1 = *((u32 *)DESC_TABLE_W1_MEM(me, idx));
+		__dme_dw_dbg(priv->dev, me, 0, dw0, dw1);
+		if ((dw0 & DME_DESC_DW0_ERROR_MASK) &&
+		    (dw0 & DME_DESC_DW0_VALID)) {
+			*((u32 *)DESC_TABLE_W0_MEM(me, idx)) =
+					(dw0 & 0xff) | DME_DESC_DW0_VALID;
+/*TODO Need to check here: May need to keep it valid for nocopy case
+ *Proper Error Handling and add_inb_buffer Required */
+			pr_err("Desc error %d\n", dw0);
+			me->read_idx = (me->read_idx + 1) & (me->entries - 1);
+			num_proc++;
+		} else if ((dw0 & DME_DESC_DW0_DONE) &&
+			   (dw0 & DME_DESC_DW0_VALID)) {
+			int seg, buf_sz;
+			AXXIA_RIO_SYSMEM_BARRIER();
+			seg = DME_DESC_DW1_MSGLEN_F(dw1);
+			buf_sz = DME_DESC_DW1_MSGLEN_B(seg);
+			buf = mb->virt_buffer[letter][me->read_idx];
+			if (!buf) {
+				dev_err(priv->dev, "Buffer Get Error\n");
+				goto err;
+			}
+
+			if ((u32)buf & 0xFF) {
+				AXXIA_RIO_SYSMEM_BARRIER();
+				memcpy(buf, desc->msg_virt, buf_sz);
+			}
+			mb->virt_buffer[letter][me->read_idx] = NULL;
+			*((u32 *)DESC_TABLE_W0_MEM(me, idx)) =
+					(dw0 & 0xfe);/*DME_DESC_INVALIDATE*/
+			*sz = buf_sz;
+
+			me->read_idx = (me->read_idx + 1) & (me->entries - 1);
+			num_proc++;
+			goto done;
+		} else {
+			goto done;
+		}
+	}
+
+done:
+	return buf;
+err:
+	buf = NULL;
+	goto done;
+}
+EXPORT_SYMBOL(axxia_get_inb_message);
+
+void *axxia_ml_get_inb_message(struct rio_mport *mport, int mbox)
+{
+	int sz;
+	return axxia_get_inb_message(mport, mbox, 0, &sz);
+}
+
+void axxia_rio_port_irq_init(struct rio_mport *mport)
+{
+	struct rio_priv *priv = mport->priv;
+	int i;
+
+	/**
+	 * Port general error indications
+	 */
+	clear_bit(RIO_IRQ_ENABLED, &priv->misc_irq.state);
+	priv->misc_irq.irq_enab_reg_addr = RAB_INTR_ENAB_MISC;
+	priv->misc_irq.irq_state_reg_addr = RAB_INTR_STAT_MISC;
+	priv->misc_irq.irq_state_mask = AMST_INT | ASLV_INT |
+					LINK_REQ_INT;
+	priv->misc_irq.irq_state_mask |= IB_DB_RCV_INT |
+					OB_DB_DONE_INT;
+	priv->misc_irq.irq_state_mask |= PORT_WRITE_INT;
+	priv->misc_irq.irq_state_mask |=
+		GRIO_INT | LL_TL_INT |
+		UNSP_RIO_REQ_INT | UNEXP_MSG_INT;
+
+	priv->misc_irq.thrd_irq_fn = misc_irq_handler;
+	priv->misc_irq.data = NULL;
+	priv->misc_irq.release_fn = misc_release_handler;
+
+
+	/**
+	 * Deadman Monitor status interrupt
+	 */
+	clear_bit(RIO_IRQ_ENABLED, &priv->linkdown_irq.state);
+	priv->linkdown_irq.irq_enab_reg_addr = 0;
+	priv->linkdown_irq.irq_state_reg_addr = RAB_SRDS_STAT1;
+	priv->linkdown_irq.irq_state_mask = RAB_SRDS_STAT1_LINKDOWN_INT;
+	priv->linkdown_irq.thrd_irq_fn = linkdown_irq_handler;
+	priv->linkdown_irq.data = NULL;
+	priv->linkdown_irq.release_fn = NULL;
+
+	/**
+	 * Outbound messages
+	 */
+	clear_bit(RIO_IRQ_ENABLED, &priv->ob_dme_irq.state);
+	priv->ob_dme_irq.irq_enab_reg_addr = RAB_INTR_ENAB_ODME;
+	priv->ob_dme_irq.irq_state_reg_addr = RAB_INTR_STAT_ODME;
+	priv->ob_dme_irq.irq_state_mask = 0;
+/*	priv->ob_dme_irq.thrd_irq_fn = ob_dme_irq_handler;*/
+	priv->ob_dme_irq.data = NULL;
+	priv->ob_dme_irq.release_fn = release_outb_dme;
+
+	for (i = 0; i < RIO_MAX_TX_MBOX; i++)
+		priv->ob_mbox[i] = NULL;
+
+/* Pre-Allocating the Outbound DME Descriptors*/
+/* MultiSegment DME*/
+	for (i = 0; i < priv->num_outb_dmes[0]; i++)
+		alloc_ob_dme_shared(priv, &priv->ob_dme_shared[i], i);
+/* SingleSegment DME*/
+	for (i = priv->num_outb_dmes[0];
+		i < priv->num_outb_dmes[0] + priv->num_outb_dmes[1]; i++) {
+		alloc_ob_dme_shared(priv, &priv->ob_dme_shared[i], i);
+	}
+
+	/**
+	 * Inbound messages
+	 */
+	clear_bit(RIO_IRQ_ENABLED, &priv->ib_dme_irq.state);
+	priv->ib_dme_irq.irq_enab_reg_addr = RAB_INTR_ENAB_IDME;
+	priv->ib_dme_irq.irq_state_reg_addr = RAB_INTR_STAT_IDME;
+	priv->ib_dme_irq.irq_state_mask = 0x0;/*IB_DME_INT_EN;*/
+	priv->ib_dme_irq.thrd_irq_fn = ib_dme_irq_handler;
+	priv->ib_dme_irq.data = NULL;
+	priv->ib_dme_irq.release_fn = release_inb_mbox;
+
+	for (i = 0; i < DME_MAX_IB_ENGINES; i++)
+		priv->ib_dme[i] = NULL;
+
+	for (i = 0; i < RIO_MAX_RX_MBOX; i++)
+		priv->ib_mbox[i] = NULL;
+	/**
+	 * PIO
+	 * Only when debug config
+	 */
+	clear_bit(RIO_IRQ_ENABLED, &priv->apio_irq.state);
+/*	priv->apio_irq.mport = mport;*/
+	priv->apio_irq.irq_enab_reg_addr = RAB_INTR_ENAB_APIO;
+	priv->apio_irq.irq_state_reg_addr = RAB_INTR_STAT_APIO;
+	priv->apio_irq.irq_state_mask = APIO_TRANS_FAILED;
+#ifdef CONFIG_AXXIA_RIO_STAT
+	priv->apio_irq.irq_state_mask |= APIO_TRANS_COMPLETE;
+#endif
+	priv->apio_irq.thrd_irq_fn = apio_irq_handler;
+	priv->apio_irq.data = NULL;
+	priv->apio_irq.release_fn = NULL;
+
+	clear_bit(RIO_IRQ_ENABLED, &priv->rpio_irq.state);
+	priv->rpio_irq.irq_enab_reg_addr = RAB_INTR_ENAB_RPIO;
+	priv->rpio_irq.irq_state_reg_addr = RAB_INTR_STAT_RPIO;
+	priv->rpio_irq.irq_state_mask = RPIO_TRANS_FAILED;
+#ifdef CONFIG_AXXIA_RIO_STAT
+	priv->rpio_irq.irq_state_mask |= RPIO_TRANS_COMPLETE;
+#endif
+	priv->rpio_irq.irq_state_mask = 0;
+	priv->rpio_irq.thrd_irq_fn = rpio_irq_handler;
+	priv->rpio_irq.data = NULL;
+	priv->rpio_irq.release_fn = NULL;
+
+}
+
+#if defined(CONFIG_RAPIDIO_HOTPLUG)
+int axxia_rio_port_notify_cb(struct rio_mport *mport,
+			       int enable,
+			       void (*cb)(struct rio_mport *mport))
+{
+	struct rio_priv *priv = mport->priv;
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&priv->port_lock, flags);
+	if (enable) {
+		if (priv->port_notify_cb)
+			rc = -EBUSY;
+		else
+			priv->port_notify_cb = cb;
+	} else {
+		if (priv->port_notify_cb != cb)
+			rc = -EINVAL;
+		else
+			priv->port_notify_cb = NULL;
+	}
+	spin_unlock_irqrestore(&priv->port_lock, flags);
+
+	return rc;
+}
+
+int axxia_rio_port_op_state(struct rio_mport *mport)
+{
+	u32 escsr;
+
+	axxia_local_config_read(priv, RIO_ESCSR(priv->port_ndx), &escsr);
+
+	if (escsr & RIO_ESCSR_PO)
+		return MPORT_STATE_OPERATIONAL;
+	else
+		return MPORT_STATE_DOWN;
+}
+#endif
diff --git a/drivers/rapidio/devices/lsi/axxia-rio-irq.h b/drivers/rapidio/devices/lsi/axxia-rio-irq.h
new file mode 100644
index 0000000..eea9307
--- /dev/null
+++ b/drivers/rapidio/devices/lsi/axxia-rio-irq.h
@@ -0,0 +1,211 @@
+#ifndef __AXXIA_RIO_IRQ_H__
+#define __AXXIA_RIO_IRQ_H__
+
+/**************************sRIO SERDES *****************************/
+#define SRIO_PHY_CONTROL0_OFFSET        (0x200)
+#define APB2SER_SRIO_PHY0_CFG_OFFSET    (0x1e0)
+#define SERDES_CMD0_OFFSET              (0x0)
+#define SERDES_CMD1_OFFSET              (0x4)
+#define SERDES_READDATA0_OFFSET         (0x8)
+#define SERDES_READDATA1_OFFSET         (0xc)
+
+#define SERDES_CMD1_VALID_SHIFT         (31)
+#define SERDES_CMD1_HWRITE_SHIFT        (30)
+#define SERDES_CMD1_TSHIFT_SHIFT        (26)
+#define SERDES_CMD1_HSZIE_SHIFT         (23)
+#define SERDES_CMD1_HTRANS_SHIFT        (21)
+#define SERDES_CMD1_HADDR_MASK          (0xFFFF)
+
+#define SERDES_READDATA1_TMO_SHIFT       (2)
+#define SERDES_READDATA1_HRESP_MASK     (0x3)
+/******************************************************************/
+
+/* forward declaration */
+struct rio_priv;
+
+#define RIO_MSG_MAX_OB_MBOX_MULTI_ENTRIES  15
+#define RIO_MSG_MULTI_SIZE                 0x1000 /* 4Kb */
+#define RIO_MSG_SEG_SIZE                   0x0100 /* 256B */
+#define RIO_MSG_MAX_MSG_SIZE               RIO_MSG_MULTI_SIZE
+#define RIO_MSG_MAX_ENTRIES                1024   /* Default Max descriptor
+						     table entries for internal
+						     descriptor builds */
+#define	RIO_MBOX_TO_IDX(mid)		\
+	((mid <= RIO_MAX_RX_MBOX_4KB) ? 0 : 1)
+#define	RIO_MBOX_TO_BUF_SIZE(mid)		\
+	((mid <= RIO_MAX_RX_MBOX_4KB) ? RIO_MSG_MULTI_SIZE : RIO_MSG_SEG_SIZE)
+#define	RIO_OUTB_DME_TO_BUF_SIZE(p, did)	\
+	((did < p->num_outb_dmes[0]) ? RIO_MSG_MULTI_SIZE : RIO_MSG_SEG_SIZE)
+
+#define DME_MAX_IB_ENGINES          32
+#define     RIO_MAX_IB_DME_MSEG		32
+#define     RIO_MAX_IB_DME_SSEG	        0
+#define DME_MAX_OB_ENGINES          3
+#define     RIO_MAX_OB_DME_MSEG		2
+#define     RIO_MAX_OB_DME_SSEG	        1
+
+#define RIO_MAX_TX_MBOX             64
+#define     RIO_MAX_TX_MBOX_4KB		3
+#define     RIO_MAX_TX_MBOX_256B	63
+#define RIO_MAX_RX_MBOX             64
+#define     RIO_MAX_RX_MBOX_4KB		3
+#define     RIO_MAX_RX_MBOX_256B	63
+
+#define RIO_MSG_MAX_LETTER          4
+
+
+#define RIO_DESC_USED 0		/* Bit index for rio_msg_desc.state */
+
+struct rio_msg_desc {
+/*	unsigned long state;*/
+/*	int desc_no;*/
+	void __iomem *msg_virt;
+	dma_addr_t msg_phys;
+	int last;
+};
+
+struct rio_msg_dme {
+	spinlock_t lock;
+	unsigned long state;
+	struct kref kref;
+	struct rio_priv *priv;
+	struct resource dres;
+	int sz;
+	int entries;
+	int write_idx;
+	int read_idx;
+	int tx_dme_tmo;
+	void *dev_id;
+	int dme_no;
+	int mbox;
+	int letter;
+	u32 dme_ctrl;
+	struct rio_msg_desc *desc;
+	struct rio_desc *descriptors;
+
+#ifdef CONFIG_AXXIA_RIO_STAT
+	unsigned int desc_done_count;
+	unsigned int desc_error_count;
+	unsigned int desc_rio_err_count;
+	unsigned int desc_axi_err_count;
+	unsigned int desc_tmo_err_count;
+#endif
+} ____cacheline_internodealigned_in_smp;
+
+struct rio_rx_mbox {
+	spinlock_t lock;
+	unsigned long state;
+	int mbox_no;
+	struct kref kref;
+	struct rio_mport *mport;
+	void **virt_buffer[RIO_MSG_MAX_LETTER];
+	int last_rx_slot[RIO_MSG_MAX_LETTER];
+	int next_rx_slot[RIO_MSG_MAX_LETTER];
+	int ring_size;
+	struct rio_msg_dme *me[RIO_MSG_MAX_LETTER];
+	unsigned int irq_state_mask;
+	struct hrtimer tmr;
+};
+
+struct rio_tx_mbox {
+	spinlock_t lock;
+	unsigned long state;
+	struct rio_mport *mport;
+	int mbox_no;
+	int dme_no;
+	int ring_size;
+	struct rio_msg_dme *me;
+	void *dev_id;
+	int tx_slot;
+#ifdef CONFIG_AXXIA_RIO_STAT
+	unsigned int sent_msg_count;
+	unsigned int compl_msg_count;
+#endif
+} ____cacheline_internodealigned_in_smp;
+
+struct rio_tx_dme {
+	int	ring_size;
+	int	ring_size_free;
+	struct rio_msg_dme *me;
+	struct hrtimer tmr;
+};
+
+#define PW_MSG_WORDS (RIO_PW_MSG_SIZE/sizeof(u32))
+
+struct rio_pw_irq {
+	/* Port Write */
+	u32 discard_count;
+	u32 msg_count;
+	u32 msg_wc;
+	u32 msg_buffer[PW_MSG_WORDS];
+};
+
+#define RIO_IRQ_ENABLED 0
+#define RIO_IRQ_ACTIVE  1
+
+#define RIO_DME_MAPPED  1
+#define RIO_DME_OPEN    0
+
+#define RIO_MB_OPEN	0
+#define RIO_MB_MAPPED	1
+
+struct rio_irq_handler {
+	unsigned long state;
+/*	struct rio_mport *mport;*/
+	u32 irq_enab_reg_addr;
+	u32 irq_state_reg_addr;
+	u32 irq_state_mask;
+	void (*thrd_irq_fn)(struct rio_irq_handler *h/*, u32 state*/);
+	void (*release_fn)(struct rio_irq_handler *h);
+	void *data;
+};
+
+extern unsigned int axxia_hrtimer_delay;
+/**********************************************/
+/* *********** External Functions *********** */
+/**********************************************/
+
+void axxia_rio_port_irq_init(struct rio_mport *mport);
+void *axxia_get_inb_message(struct rio_mport *mport, int mbox, int letter,
+			      int *sz/*, int *slot, u16 *destid*/);
+int axxia_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf);
+void axxia_close_inb_mbox(struct rio_mport *mport, int mbox);
+int axxia_open_inb_mbox(struct rio_mport *mport, void *dev_id,
+			  int mbox, int entries);
+int axxia_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev,
+			     int mbox_dest, int letter, int flags,
+			     void *buffer, size_t len/*, void *cookie*/);
+void axxia_close_outb_mbox(struct rio_mport *mport, int mbox_id);
+int axxia_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox_id,
+			 int entries/*, int prio*/);
+int axxia_rio_doorbell_send(struct rio_mport *mport,
+			      int index, u16 destid, u16 data);
+int axxia_rio_pw_enable(struct rio_mport *mport, int enable);
+void axxia_rio_port_get_state(struct rio_mport *mport, int cleanup);
+int axxia_rio_port_irq_enable(struct rio_mport *mport);
+void axxia_rio_port_irq_disable(struct rio_mport *mport);
+
+int axxia_ml_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev,
+			     int mbox_dest, void *buffer, size_t len);
+void *axxia_ml_get_inb_message(struct rio_mport *mport, int mbox);
+int alloc_irq_handler(
+	struct rio_irq_handler *h,
+	void *data,
+	const char *name);
+
+void release_mbox_resources(struct rio_priv *priv, int mbox_id);
+void release_irq_handler(struct rio_irq_handler *h);
+void db_irq_handler(struct rio_irq_handler *h, u32 state);
+extern int axxia_rio_init_sysfs(struct platform_device *dev);
+extern void axxia_rio_release_sysfs(struct platform_device *dev);
+
+#if defined(CONFIG_RAPIDIO_HOTPLUG)
+
+int axxia_rio_port_notify_cb(struct rio_mport *mport,
+			       int enable,
+			       void (*cb)(struct rio_mport *mport));
+int axxia_rio_port_op_state(struct rio_mport *mport);
+
+#endif
+
+#endif /* __AXXIA_RIO_IRQ_H__ */
diff --git a/drivers/rapidio/devices/lsi/axxia-rio-sysfs.c b/drivers/rapidio/devices/lsi/axxia-rio-sysfs.c
new file mode 100644
index 0000000..a1c522d
--- /dev/null
+++ b/drivers/rapidio/devices/lsi/axxia-rio-sysfs.c
@@ -0,0 +1,287 @@
+/*
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* #define DEBUG */
+/* #define IO_OPERATIONS */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include "axxia-rio.h"
+#include "axxia-rio-irq.h"
+
+static ssize_t axxia_rio_stat_show(struct device *dev,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	struct rio_mport *mport = dev_get_drvdata(dev);
+	struct rio_priv *priv = mport->priv;
+	char *str = buf;
+	u32 reg_val = 0;
+
+	axxia_rio_port_get_state(mport, 0);
+	str += sprintf(str, "Master Port state:\n");
+	axxia_local_config_read(priv, RIO_ESCSR(priv->port_ndx), &reg_val);
+	str += sprintf(str, "ESCSR (0x158) : 0x%08x\n", reg_val);
+	return str - buf;
+}
+static DEVICE_ATTR(stat, S_IRUGO, axxia_rio_stat_show, NULL);
+
+static ssize_t axxia_rio_misc_stat_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct rio_mport *mport = dev_get_drvdata(dev);
+	struct rio_priv *priv = mport->priv;
+	char *str = buf;
+	str += sprintf(str, "RIO PIO Stat:\n");
+	str += sprintf(str, "\t Successful Count: %d\n",
+					priv->rpio_compl_count);
+	str += sprintf(str, "\t Failed Count    : %d\n",
+					priv->rpio_compl_count);
+
+	str += sprintf(str, "AXI PIO Stat:\n");
+	str += sprintf(str, "\t Successful Count: %d\n",
+					priv->apio_compl_count);
+	str += sprintf(str, "\t Failed Count    : %d\n",
+					priv->apio_compl_count);
+
+	str += sprintf(str, "Port Write Stat:\n");
+	str += sprintf(str, "\t Interrupt Count : %d\n", priv->rio_pw_count);
+	str += sprintf(str, "\t Message Count   : %d\n",
+					priv->rio_pw_msg_count);
+
+	return str - buf;
+
+}
+static DEVICE_ATTR(misc_stat, S_IRUGO,
+		   axxia_rio_misc_stat_show, NULL);
+static ssize_t axxia_rio_ib_dme_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct rio_mport *mport = dev_get_drvdata(dev);
+	struct rio_priv *priv = mport->priv;
+	char *str = buf;
+	int e, j;
+	struct rio_rx_mbox *mb;
+	struct rio_msg_dme *me;
+	str += sprintf(str, "Inbound Mailbox (DME) counters:\n");
+	for (e = 0; e < RIO_MAX_RX_MBOX; e++) {
+		mb = priv->ib_mbox[e];
+		if (mb) {
+			for (j = 0; j < RIO_MSG_MAX_LETTER; j++) {
+				me = mb->me[j];
+				str += sprintf(str,
+					"Mbox %d Letter %d DME %d\n",
+					 mb->mbox_no, j, me->dme_no);
+				str += sprintf(str,
+					"\tNumber of Desc Done  : %d\n",
+					me->desc_done_count);
+				str += sprintf(str,
+					"\tNumber of Desc Errors: %d\n",
+					me->desc_error_count);
+				str += sprintf(str,
+					"\t\tRIO Error    : %d\n",
+					me->desc_rio_err_count);
+				str += sprintf(str,
+					"\t\tAXI Error    : %d\n",
+					me->desc_axi_err_count);
+				str += sprintf(str,
+					"\t\tTimeout Error: %d\n",
+					me->desc_tmo_err_count);
+			}
+		}
+	}
+	return str - buf;
+}
+static DEVICE_ATTR(ib_dme_stat, S_IRUGO,
+		   axxia_rio_ib_dme_show, NULL);
+
+static ssize_t axxia_rio_ob_dme_show(struct device *dev,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	struct rio_mport *mport = dev_get_drvdata(dev);
+	struct rio_priv *priv = mport->priv;
+	char *str = buf;
+	int e;
+	struct rio_msg_dme *me;
+	struct rio_tx_mbox *mb;
+
+	str += sprintf(str, "Outbound Message Engine Counters:\n");
+	for (e = 0; e < DME_MAX_OB_ENGINES; e++) {
+		me = priv->ob_dme_shared[e].me;
+		if (me) {
+			str += sprintf(str, "DME %d Enabled\n", e);
+			str += sprintf(str, "\tNumber of Desc Done  : %d\n",
+					me->desc_done_count);
+			str += sprintf(str, "\tNumber of Desc Errors: %d\n",
+					me->desc_error_count);
+			str += sprintf(str, "\t\tRIO Error    : %d\n",
+					me->desc_rio_err_count);
+			str += sprintf(str, "\t\tAXI Error    : %d\n",
+					me->desc_axi_err_count);
+			str += sprintf(str, "\t\tTimeout Error: %d\n",
+					me->desc_tmo_err_count);
+		} else
+			str += sprintf(str, "DME %d Disabled\n", e);
+	}
+	str += sprintf(str, "*********************************\n");
+	str += sprintf(str, "Outbound Mbox stats\n");
+	for (e = 0; e < RIO_MAX_TX_MBOX; e++) {
+		mb = priv->ob_mbox[e];
+		if (!mb)
+			continue;
+		if ((mb->sent_msg_count) || (mb->compl_msg_count)) {
+			if (test_bit(RIO_DME_OPEN, &mb->state))
+				str += sprintf(str, "Mailbox %d: DME %d\n",
+							e, mb->dme_no);
+			else
+				str += sprintf(str, "Mailbox %d : Closed\n",
+							e);
+			str += sprintf(str, "\tMessages sent     : %d\n",
+						mb->sent_msg_count);
+			str += sprintf(str, "\tMessages Completed: %d\n",
+						mb->compl_msg_count);
+		}
+	}
+
+	return str - buf;
+}
+static DEVICE_ATTR(ob_dme_stat, S_IRUGO,
+		   axxia_rio_ob_dme_show, NULL);
+
+static ssize_t axxia_rio_irq_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct rio_mport *mport = dev_get_drvdata(dev);
+	struct rio_priv *priv = mport->priv;
+	u32 stat;
+	char *str = buf;
+
+	str += sprintf(str, "Interrupt enable bits:\n");
+	axxia_local_config_read(priv, RAB_INTR_ENAB_GNRL, &stat);
+	str += sprintf(str, "General Interrupt Enable (%p)\t%8.8x\n",
+		       (void *)RAB_INTR_ENAB_GNRL, stat);
+	axxia_local_config_read(priv, RAB_INTR_ENAB_ODME, &stat);
+	str += sprintf(str, "Outbound Message Engine  (%p)\t%8.8x\n",
+		       (void *)RAB_INTR_ENAB_ODME, stat);
+	axxia_local_config_read(priv, RAB_INTR_ENAB_IDME, &stat);
+	str += sprintf(str, "Inbound Message Engine   (%p)\t%8.8x\n",
+		       (void *)RAB_INTR_ENAB_IDME, stat);
+	axxia_local_config_read(priv, RAB_INTR_ENAB_MISC, &stat);
+	str += sprintf(str, "Miscellaneous Events     (%p)\t%8.8x\n",
+		       (void *)RAB_INTR_ENAB_MISC, stat);
+	axxia_local_config_read(priv, RAB_INTR_ENAB_APIO, &stat);
+	str += sprintf(str, "Axxia Bus to RIO Events  (%p)\t%8.8x\n",
+		       (void *)RAB_INTR_ENAB_APIO, stat);
+	axxia_local_config_read(priv, RAB_INTR_ENAB_RPIO, &stat);
+	str += sprintf(str, "RIO to Axxia Bus Events  (%p)\t%8.8x\n",
+		       (void *)RAB_INTR_ENAB_RPIO, stat);
+
+	str += sprintf(str, "OBDME : in Timer Mode, Period %9.9d nanosecond\n",
+			axxia_hrtimer_delay);
+	str += sprintf(str, "IBDME : ");
+	if (priv->dme_mode == AXXIA_IBDME_TIMER_MODE)
+		str += sprintf(str, "in Timer Mode, Period %9.9d nanosecond\n",
+			axxia_hrtimer_delay);
+	else
+		str += sprintf(str, "in Interrupt Mode\n");
+	return str - buf;
+}
+static DEVICE_ATTR(irq, S_IRUGO, axxia_rio_irq_show, NULL);
+
+static ssize_t axxia_rio_tmo_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct rio_mport *mport = dev_get_drvdata(dev);
+	struct rio_priv *priv = mport->priv;
+	u32 stat;
+	char *str = buf;
+
+	str += sprintf(str, "Port Link Timeout Control Registers:\n");
+	axxia_local_config_read(priv, RIO_PLTOCCSR, &stat);
+	str += sprintf(str, "PLTOCCSR (%p)\t%8.8x\n",
+		       (void *)RIO_PLTOCCSR, stat);
+	axxia_local_config_read(priv, RIO_PRTOCCSR, &stat);
+	str += sprintf(str, "PRTOCCSR (%p)\t%8.8x\n",
+		       (void *)RIO_PRTOCCSR, stat);
+	axxia_local_config_read(priv, RAB_STAT, &stat);
+	str += sprintf(str, "RAB_STAT (%p)\t%8.8x\n",
+		       (void *)RAB_STAT, stat);
+	axxia_local_config_read(priv, RAB_APIO_STAT, &stat);
+	str += sprintf(str, "RAB_APIO_STAT (%p)\t%8.8x\n",
+		       (void *)RAB_APIO_STAT, stat);
+	axxia_local_config_read(priv, RIO_ESCSR(priv->port_ndx), &stat);
+	str += sprintf(str, "PNESCSR (%p)\t%8.8x\n",
+		       (void *)RIO_ESCSR(priv->port_ndx), stat);
+
+	return str - buf;
+}
+static DEVICE_ATTR(tmo, S_IRUGO, axxia_rio_tmo_show, NULL);
+
+static ssize_t axxia_ib_dme_log_show(struct device *dev,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	struct rio_mport *mport = dev_get_drvdata(dev);
+	struct rio_priv *priv = mport->priv;
+	u32 stat, log;
+	char *str = buf;
+
+	axxia_local_config_read(priv, RAB_INTR_STAT_MISC, &stat);
+	log = (stat & UNEXP_MSG_LOG) >> 24;
+	str += sprintf(str, "mbox[1:0]   %x\n", (log & 0xc0) >> 6);
+	str += sprintf(str, "letter[1:0] %x\n", (log & 0x30) >> 4);
+	str += sprintf(str, "xmbox[3:0] %x\n", log & 0x0f);
+
+	return str - buf;
+}
+static DEVICE_ATTR(dme_log, S_IRUGO, axxia_ib_dme_log_show, NULL);
+
+static struct attribute *rio_attributes[] = {
+	&dev_attr_stat.attr,
+	&dev_attr_irq.attr,
+	&dev_attr_misc_stat.attr,
+	&dev_attr_ob_dme_stat.attr,
+	&dev_attr_ib_dme_stat.attr,
+	&dev_attr_tmo.attr,
+	&dev_attr_dme_log.attr,
+	NULL
+};
+
+static struct attribute_group rio_attribute_group = {
+	.name = NULL,
+	.attrs = rio_attributes,
+};
+
+int axxia_rio_init_sysfs(struct platform_device *dev)
+{
+	return sysfs_create_group(&dev->dev.kobj, &rio_attribute_group);
+}
+void axxia_rio_release_sysfs(struct platform_device *dev)
+{
+	sysfs_remove_group(&dev->dev.kobj, &rio_attribute_group);
+}
diff --git a/drivers/rapidio/devices/lsi/axxia-rio.c b/drivers/rapidio/devices/lsi/axxia-rio.c
new file mode 100644
index 0000000..79de85b
--- /dev/null
+++ b/drivers/rapidio/devices/lsi/axxia-rio.c
@@ -0,0 +1,1777 @@
+/*
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* #define IODEBUG */
+/* #define EXTRA1DEBUG */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/of_platform.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include <linux/dmapool.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <mach/rio.h>
+#include "axxia-rio.h"
+#include "axxia-rio-irq.h"
+
+#define USE_DME_TIMEOUT
+static DEFINE_SPINLOCK(rio_io_lock);
+
+#ifdef IODEBUG
+#define	IODP(...)	pr_info(__VA_ARGS__)
+#else
+#define	IODP(...)
+#endif
+
+#ifdef EXTRA1DEBUG
+#define	EXT1P(...)	pr_info(__VA_ARGS__)
+#else
+#define	EXT1P(...)
+#endif
+
+#define RIO_IO_READ_HOME        0x00
+#define RIO_MAINT_READ          0x01
+#define RIO_MAINT_WRITE         0x10
+#define RIO_NREAD               0x02
+#define RIO_NWRITE              0x20
+#define RIO_NWRITE_R            0x40
+#define RIO_SWRITE              0x80
+/**
+ * NOTE:
+ *
+ * sRIO Bridge in AXXIA is what it is...
+ *
+ * - Paged access to configuration registers makes local config
+ *   read a non-atomic operation.
+ *
+ * - Big and Little Endian mode registers
+ *   Big Endian:
+ *       0x0000-0xFFFC   - RapidIO Standard Registers
+ *       0x10000-0x1FFFC - Endpoint Controller Specific Registers
+ *   Little Endian
+ *       0x20000-0x3FFFC - Peripheral Bus Bridge Specific Registers
+ *
+ * "SRIO_CONF" registers in AXXIA (e.g. page selection register)
+ * are also Little Endian.  SRIO_CONF is organized as follows:
+ *
+ * - 0x000 .. 0x7ff    Fixed mapping to SRIO/RAB endpoint controller specific
+ *                     registers equivalent to 0x20000 .. 0x207ff.  The
+ *                     RAB_APB_CSR register within this block is used to
+ *                     control the page selection of the 'paged mapping'
+ *                     block.
+ * - 0x800 .. 0xfff    Paged mapping to SRIO generic+endpoint controller
+ *                     specific registers equivalent to 0x00000 .. 0x3ffff
+ *
+ * To avoid an extra spin-lock layer in __axxia_local_config_read
+ * and __axxia_local_config_write, perform all internal driver accesses
+ * to local config registers through the generic rio driver API.
+ *
+ * Accesses through the generic driver:__rio_local_write_config_32(),
+ * __rio_local_read_config_32(), rio_mport_write_config_32() and
+ * rio_mport_read_config_32() all use spin_lock_irqsave() /
+ * spin_unlock_irqrestore(), to ensure local access restrictions.
+ *
+ */
+
+/**
+ * __axxia_local_config_read - Generate a AXXIA local config space read
+ * @priv: Master port private data
+ * @offset: Offset into configuration space
+ * @data: Value to be read into
+ *
+ * Generates a AXXIA local configuration space read.
+ * Returns %0 on success or %-EINVAL on failure.
+ */
+int axxia_local_config_read(struct rio_priv *priv,
+			    u32 offset,
+			    u32 *data)
+{
+	u32 page_sel;
+
+
+	if ((offset >= RAB_REG_BASE) &&
+	    (offset < (RAB_REG_BASE+SRIO_CONF_SPACE_SIZE_FIXED))) {
+		/*
+		 * Peripheral Bus Bridge Specific Registers
+		 * (0x2_0000-0x2_0FFC)
+		 */
+		*data = ioread32(priv->regs_win_fixed + (offset & 0x7ff));
+	} else {
+		/* Set correct page to operate on */
+		page_sel = (offset & 0x00fff800) << 5;
+		iowrite32(page_sel, priv->regs_win_fixed + RAB_APB_CSR_BASE);
+
+		if (offset < RAB_REG_BASE) {
+			/*
+			* Registers:
+			*   RapidIO Standard (0x0000-0xFFFC)
+			*   Endpoint Controller Specific (0x1_0000-0x1_FFFC)
+			*/
+			*data = ioread32be(priv->regs_win_paged +
+						(offset & 0x7ff));
+		} else if ((offset >= RAB_REG_BASE) &&
+			   (offset < SRIO_SPACE_SIZE)) {
+			/*
+			* Peripheral Bus Bridge Specific Registers
+			* (0x2_0000-0x3_FFFC)
+			*/
+			*data = ioread32(priv->regs_win_paged +
+						(offset & 0x7ff));
+		} else {
+			dev_err(priv->dev,
+				"RIO: Reading config register not specified "
+				"for AXXIA (0x%8.8x)\n",
+				offset);
+		}
+	}
+
+	IODP("rio[%d]: ACR(%08x, <%08x)\n", priv->mport->id, offset, *data);
+
+	return 0;
+}
+
+/**
+ * axxia_local_config_write - Generate a AXXIA local config space write
+ * @priv: Master port private data
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @data: Value to be written
+ *
+ * Generates a AXXIA local configuration space write.
+ * Returns %0 on success or %-EINVAL on failure.
+ */
+int axxia_local_config_write(struct rio_priv *priv,
+				      u32 offset,
+				      u32 data)
+{
+	u32 page_sel;
+
+	if ((offset >= RAB_REG_BASE) &&
+	    (offset < (RAB_REG_BASE+SRIO_CONF_SPACE_SIZE_FIXED))) {
+		/*
+		 * Peripheral Bus Bridge Specific Registers
+		 * (0x2_0000-0x2_0FFC)
+		 */
+		iowrite32(data, priv->regs_win_fixed + (offset & 0x7ff));
+	} else {
+		/* Set correct page to operate on */
+		page_sel = (offset & 0x00fff800) << 5;
+		iowrite32(page_sel, priv->regs_win_fixed + RAB_APB_CSR_BASE);
+
+		if (offset < RAB_REG_BASE) {
+			/*
+			* Registers:
+			*   RapidIO Standard (0x0000-0xFFFC)
+			*   Endpoint Controller Specific (0x1_0000-0x1_FFFC)
+			*/
+			iowrite32be(data, priv->regs_win_paged +
+						(offset & 0x7ff));
+		} else if ((offset >= RAB_REG_BASE) &&
+			   (offset < SRIO_SPACE_SIZE)) {
+			/*
+			* Peripheral Bus Bridge Specific Registers
+			* (0x2_0000-0x3_FFFC)
+			*/
+			iowrite32(data, priv->regs_win_paged +
+						(offset & 0x7ff));
+		} else {
+			dev_err(priv->dev,
+				"RIO: Trying to write to config register "
+				"not specified for AXIA (0x%8.8x)\n",
+				offset);
+		}
+	}
+
+	IODP("rio[%d]: ACW(%08x, >%08x)\n", priv->mport->id, offset, data);
+
+	return 0;
+}
+
+/**
+ * axxia_rio_local_config_read - Generate a AXXIA local config space read
+ * @mport: RapidIO master port info
+ * @index: ID of RapdiIO interface
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @data: Value to be read into
+ *
+ * Generates a AXXIA local configuration space read.
+ * Returns %0 on success or %-EINVAL on failure.
+ */
+static
+int axxia_rio_local_config_read(struct rio_mport *mport,
+				int index, u32 offset, int len, u32 *data)
+{
+	struct rio_priv *priv = mport->priv;
+	int rc;
+
+	if (len != sizeof(u32))
+		return -EINVAL;
+	rc = axxia_local_config_read(priv, offset, data);
+
+	return rc;
+}
+
+/**
+ * axxia_rio_local_config_write - Generate a AXXIA local config space write
+ * @mport: RapidIO master port info
+ * @index: ID of RapdiIO interface
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @data: Value to be written
+ *
+ * Generates a AXXIA local configuration space write.
+ * Returns %0 on success or %-EINVAL on failure.
+ */
+static
+int axxia_rio_local_config_write(struct rio_mport *mport,
+				 int index, u32 offset, int len, u32 data)
+{
+	struct rio_priv *priv = mport->priv;
+	int rc;
+
+	if (len != sizeof(u32))
+		return -EINVAL;
+	rc = axxia_local_config_write(priv, offset, data);
+
+	return rc;
+}
+
+/**
+ * axxia_rio_config_read - Generate a AXXIA read maintenance transaction
+ * @mport: RapidIO master port info
+ * @index: ID of RapdiIO interface
+ * @destid: Destination ID of transaction
+ * @hopcount: Number of hops to target device
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @val: Location to be read into
+ *
+ * Generates a AXXIA read maintenance transaction.
+ * Returns %0 on success or %-EINVAL on failure.
+ */
+
+static
+int axxia_rio_config_read(struct rio_mport *mport, int index,
+			  u16 destid, u8 hopcount, u32 offset,
+			  int len, u32 *val)
+{
+	struct rio_priv *priv = mport->priv;
+	struct atmu_outb *aoutb = NULL;
+	u8 *addr;
+	u32 rval = 0;
+	u32 rbar = 0, ctrl;
+	int rc = 0;
+	u32 error_code = 0;
+
+	aoutb = &priv->outb_atmu[priv->maint_win_id];
+	if (aoutb == NULL)
+		return -EINVAL;
+
+	/* 16MB maintenance windows possible */
+	/* Allow only aligned access to maintenance registers */
+	if (offset > (CONFIG_RIO_MAINT_WIN_SIZE - len) ||
+		!IS_ALIGNED(offset, len))
+		return -EINVAL;
+
+	axxia_local_config_read(priv,
+				       RAB_APIO_AMAP_CTRL(priv->maint_win_id),
+				       &ctrl);
+
+	if (TTYPE_VAL(ctrl)) { /* Not maintenance */
+		dev_err(priv->dev,
+			"(%s): Window is not setup for Maintenance "
+			"operations. 0x%8.8x\n",
+			__func__, ctrl);
+		return -EINVAL;
+	}
+
+	rbar &= ~HOP_COUNT(0xff);     /* Hop Count clear */
+	rbar |= HOP_COUNT(hopcount);  /* Hop Count set */
+	axxia_local_config_write(priv,
+				 RAB_APIO_AMAP_RBAR(priv->maint_win_id),
+				 rbar);
+
+	ctrl &= ~TARGID(0xffff); /* Target id clear */
+	ctrl |= TARGID(destid);  /* Target id set */
+	axxia_local_config_write(priv,
+				 RAB_APIO_AMAP_CTRL(priv->maint_win_id),
+				 ctrl);
+
+	addr = (u8 *) aoutb->win +
+		(offset & (CONFIG_RIO_MAINT_WIN_SIZE - 1));
+
+	switch (len) {
+	case 1:
+		IN_SRIO8(addr, rval, rc);
+		break;
+	case 2:
+		IN_SRIO16(addr, rval, rc);
+		break;
+	case 4:
+		IN_SRIO32(addr, rval, rc);
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	axxia_local_config_read(priv, 0x608, &error_code);
+	if (0 != error_code) {
+		rc = -EINVAL;
+		*val = 0xffffffffu;
+		/* clear error code */
+		axxia_local_config_write(priv,  0x608, 0);
+	}
+
+	if (rc) {
+		dev_dbg(priv->dev,
+			"axxia_rio_config_read: Error when reading\n");
+		dev_dbg(priv->dev,
+			"rio[%d]: RCR(did=%x, hc=%02x, %08x, <%08x)\n",
+			mport->id, destid, hopcount, offset, rval);
+	} else
+		*val = rval;
+
+	IODP("rio[%d]: RCR(did=%x, hc=%02x, %08x, <%08x)\n",
+		mport->id, destid, hopcount, offset, rval);
+
+	return rc;
+}
+
+/**
+ * axxia_rio_config_write - Generate a AXXIA write maintenance transaction
+ * @mport: RapidIO master port info
+ * @index: ID of RapdiIO interface
+ * @destid: Destination ID of transaction
+ * @hopcount: Number of hops to target device
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @val: Value to be written
+ *
+ * Generates an AXXIA write maintenance transaction.
+ * Returns %0 on success or %-EINVAL on failure.
+ */
+static
+int axxia_rio_config_write(struct rio_mport *mport, int index,
+			   u16 destid, u8 hopcount, u32 offset,
+			   int len, u32 val)
+{
+	struct rio_priv *priv = mport->priv;
+	struct atmu_outb *aoutb = NULL;
+	u8 *data;
+	u32 rbar = 0, ctrl, rval;
+	int rc = 0;
+	u32 error_code = 0;
+
+	IODP("rio[%d]: RCW(did=%x, hc=%02x, %08x, >%08x)\n",
+		mport->id, destid, hopcount, offset, val);
+
+	/* Argument validation */
+
+	aoutb = &priv->outb_atmu[priv->maint_win_id];
+	if (aoutb == NULL)
+		return -EINVAL;
+
+	/* 16MB maintenance windows possible */
+	/* Allow only aligned access to maintenance registers */
+	if (offset > (CONFIG_RIO_MAINT_WIN_SIZE - len) ||
+		!IS_ALIGNED(offset, len))
+		return -EINVAL;
+
+	axxia_local_config_read(priv,
+				RAB_APIO_AMAP_CTRL(priv->maint_win_id),
+				&ctrl);
+
+	if (TTYPE_VAL(ctrl)) { /* Not maintenance */
+		dev_err(priv->dev,
+			"(%s): Window is not setup for Maintenance "
+			"operations.\n",
+			__func__);
+		rc = -EINVAL;
+		goto err;
+	}
+
+	rbar &= ~HOP_COUNT(0xff);     /* Hop Count clear */
+	rbar |= HOP_COUNT(hopcount);  /* Hop Count set */
+	axxia_local_config_write(priv,
+				 RAB_APIO_AMAP_RBAR(priv->maint_win_id),
+				 rbar);
+
+	ctrl &= ~TARGID(0xffff); /* Target id clear */
+	ctrl |= TARGID(destid);  /* Target id set */
+	axxia_local_config_write(priv,
+				 RAB_APIO_AMAP_CTRL(priv->maint_win_id),
+				 ctrl);
+	rval = val;
+	data = (u8 *) aoutb->win +
+		(offset & (CONFIG_RIO_MAINT_WIN_SIZE - 1));
+
+	switch (len) {
+	case 1:
+		OUT_SRIO8(data, rval);
+		break;
+	case 2:
+		OUT_SRIO16(data, rval);
+		break;
+	case 4:
+		OUT_SRIO32(data, rval);
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	axxia_local_config_read(priv,  0x608, &error_code);
+	if (0 != error_code) {
+
+		dev_dbg(priv->dev,
+			"axxia_rio_config_write: Error when writing\n");
+
+		dev_dbg(priv->dev,
+			"rio[%d]: RCW(did=%x, hc=%02x, %08x, >%08x)\n",
+			mport->id, destid, hopcount, offset, val);
+
+		rc = -EINVAL;
+		/* clear error code */
+		axxia_local_config_write(priv,  0x608, 0);
+	}
+
+err:
+	return rc;
+}
+
+static inline int __flags2rio_tr_type(u32 mflags, u32 *trans_type)
+{
+	*trans_type = 0;
+	/* Set type of window */
+	if ((mflags == 0) || (mflags & RIO_NWRITE_R))
+		*trans_type = TTYPE(NRD_NWR_R); /* nread and nwrite_r */
+	else if (mflags & RIO_MAINT_WRITE)
+		*trans_type = TTYPE(MRD_MWR); /* mread and mwrite */
+	else if (mflags & RIO_NWRITE)
+		*trans_type = TTYPE(NRD_NWR); /* nread and nwrite */
+	else if (mflags & RIO_SWRITE)
+		*trans_type = TTYPE(NRD_SWR); /* nread and swrite */
+	else
+		return -EINVAL;
+	return 0;
+}
+
+#if 0
+/**
+ * axxia_rio_map_outb_mem -- Mapping outbound memory.
+ * @mport:  RapidIO master port
+ * @win:    Outbound ATMU window for this access
+ *          - obtained by calling axxia_rio_req_outb_region.
+ * @destid: Destination ID of transaction
+ * @addr:   RapidIO space start address.
+ * @res:    Mapping region phys and virt start address
+ *
+ * Return: 0 -- Success.
+ *
+ */
+static int axxia_rio_map_outb_mem(struct rio_mport *mport, u32 win,
+				u16 destid, u32 addr, u32 mflags,
+				struct rio_map_addr *res)
+{
+	struct rio_priv *priv = mport->priv;
+	struct atmu_outb *aoutb;
+	u32 rbar = 0, ctrl, trans_type;
+	unsigned long flags;
+	int rc;
+
+	rc = __flags2rio_tr_type(mflags, &trans_type);
+	if (rc < 0) {
+		dev_err(priv->dev, "(%s) invalid transaction flags %x\n",
+			__func__, mflags);
+		return rc;
+	}
+
+	spin_lock_irqsave(&rio_io_lock, flags);
+
+	aoutb = &priv->outb_atmu[win];
+	if (unlikely(win >= RIO_OUTB_ATMU_WINDOWS ||
+		     !(aoutb->in_use && aoutb->riores))) {
+		spin_unlock_irqrestore(&rio_io_lock, flags);
+		dev_err(priv->dev, "(%s) faulty ATMU window (%d, %d, %8.8x)\n",
+			__func__, win, aoutb->in_use, (u32) aoutb->riores);
+		return -EINVAL;
+	}
+	__rio_local_read_config_32(mport, RAB_APIO_AMAP_CTRL(win), &ctrl);
+
+	if (TTYPE_VAL(ctrl) != trans_type) {
+		ctrl &= ~TTYPE(0x3);
+		ctrl |= trans_type;
+	}
+	if (TTYPE_VAL(ctrl)) { /* RIO address set - Not maintenance */
+		rbar |= RIO_ADDR_BASE(addr);
+		__rio_local_write_config_32(mport,
+					    RAB_APIO_AMAP_RBAR(win),
+					    rbar);
+	}
+	ctrl &= ~TARGID(0xffff); /* Target id clear */
+	ctrl |= TARGID(destid); /* Target id set */
+	ctrl |= ENABLE_AMBA; /* Enable window */
+	__rio_local_write_config_32(mport, RAB_APIO_AMAP_CTRL(win), ctrl);
+
+	res->phys = aoutb->riores->start + RIO_ADDR_OFFSET(addr);
+	res->va = aoutb->win + RIO_ADDR_OFFSET(addr);
+
+	spin_unlock_irqrestore(&rio_io_lock, flags);
+
+	return 0;
+}
+#endif
+/**
+ * axxia_rio_req_outb_region -- Request outbound region in the
+ *                            RapidIO bus address space.
+ * @mport:  RapidIO master port
+ * @size:   The mapping region size.
+ * @name:   Resource name
+ * @flags:  Flags for mapping. 0 for using default flags.
+ * @id:     Allocated outbound ATMU window id
+ *
+ * Return: 0 -- Success.
+ *
+ * This function will reserve a memory region that may
+ * be used to create mappings from local iomem to rio space.
+ */
+static int axxia_rio_req_outb_region(struct rio_mport *mport,
+				   resource_size_t size,
+				   const char *name,
+				   u32 mflags, u32 *id)
+{
+	u32 win, reg, win_size = 0, trans_type = 0, wabar = 0;
+	struct rio_priv *priv = mport->priv;
+	struct atmu_outb *aoutb;
+	int rc = 0;
+	void __iomem *iowin;
+	struct resource *riores;
+	unsigned long flags;
+
+	if (!(is_power_of_2(size))) {
+		dev_err(priv->dev, "(%s) size is not power of 2 (%llu)\n",
+			__func__, size);
+		return -EFAULT;
+	}
+	rc = __flags2rio_tr_type(mflags, &trans_type);
+	if (rc < 0) {
+		dev_err(priv->dev, "(%s) invalid transaction flags %x\n",
+			__func__, mflags);
+		return rc;
+	}
+
+	spin_lock_irqsave(&rio_io_lock, flags);
+
+	for (win = 0; win < RIO_OUTB_ATMU_WINDOWS; win++) {
+		if (!(priv->outb_atmu[win].in_use))
+			break;
+	}
+
+	if (win == RIO_OUTB_ATMU_WINDOWS) {
+		spin_unlock_irqrestore(&rio_io_lock, flags);
+		dev_err(priv->dev,
+			"(%s) out of ATMU windows to use\n",
+			__func__);
+		return -ENOMEM;
+	}
+	aoutb = &priv->outb_atmu[win];
+	aoutb->in_use = 1;
+	aoutb->win = NULL;
+	aoutb->riores = NULL;
+
+	riores = kzalloc(sizeof(struct resource), GFP_ATOMIC);
+	if (!riores) {
+		aoutb->in_use = 0;
+		spin_unlock_irqrestore(&rio_io_lock, flags);
+		dev_err(priv->dev,
+			"(%s) failed to allocate resources\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	spin_unlock_irqrestore(&rio_io_lock, flags);
+
+	riores->name = name;
+	riores->flags = IORESOURCE_MEM;
+	if (allocate_resource(&mport->iores, riores,
+			      size, mport->iores.start,
+			      mport->iores.end, 0x400, NULL, NULL)) {
+		/* Align on 1kB boundry */
+		rc = -ENOMEM;
+		goto out_err_resource;
+	}
+
+	iowin = ioremap(riores->start, size);
+	if (!iowin) {
+		rc = -ENOMEM;
+		goto out_err_ioremap;
+	}
+
+	/* Set base address for window on PIO side */
+	wabar = AXI_BASE_HIGH(riores->start);
+	wabar |= AXI_BASE(riores->start);
+	__rio_local_write_config_32(mport, RAB_APIO_AMAP_ABAR(win), wabar);
+
+	if (0 == WIN_SIZE((u32)size))
+		size = 0x400u; /* make sure window size is at least 1KiB big*/
+
+
+	/* Set size of window */
+	win_size |= WIN_SIZE((u32)size);
+	__rio_local_write_config_32(mport, RAB_APIO_AMAP_SIZE(win), win_size);
+	__rio_local_read_config_32(mport, RAB_APIO_AMAP_CTRL(win), &reg);
+	reg &= ~TTYPE(0x3);
+	reg |= trans_type;
+	__rio_local_write_config_32(mport, RAB_APIO_AMAP_CTRL(win), reg);
+
+	spin_lock_irqsave(&rio_io_lock, flags);
+	aoutb->win = iowin;
+	aoutb->riores = riores;
+	spin_unlock_irqrestore(&rio_io_lock, flags);
+
+	*id = win;
+	return 0;
+
+out_err_ioremap:
+	dev_err(priv->dev, "(%s) ioremap IO-mem failed\n",
+		__func__);
+	if (release_resource(riores))
+		dev_err(priv->dev, "(%s) clean-up resource failed\n", __func__);
+out_err_resource:
+	dev_err(priv->dev, "(%s) alloc IO-mem for %s failed\n",
+		__func__, name);
+	kfree(riores);
+
+	spin_lock_irqsave(&rio_io_lock, flags);
+	aoutb->in_use = 0;
+	spin_unlock_irqrestore(&rio_io_lock, flags);
+	return rc;
+}
+
+/**
+ * axxia_rio_release_outb_region -- Unreserve outbound memory region.
+ * @mport: RapidIO master port
+ * @win:   Allocated outbound ATMU window id
+ *
+ * Disables and frees the memory resource of an outbound memory region
+ */
+static void axxia_rio_release_outb_region(struct rio_mport *mport,
+					u32 win)
+{
+	struct rio_priv *priv = mport->priv;
+	u32 ctrl;
+	unsigned long flags;
+
+	if (unlikely(win >= RIO_OUTB_ATMU_WINDOWS))
+		return;
+
+	spin_lock_irqsave(&rio_io_lock, flags);
+
+	__rio_local_read_config_32(mport, RAB_APIO_AMAP_CTRL(win), &ctrl);
+	if (likely(priv->outb_atmu[win].in_use)) {
+		struct atmu_outb *aoutb = &priv->outb_atmu[win];
+		struct resource *riores = aoutb->riores;
+		void __iomem *iowin = aoutb->win;
+
+		__rio_local_write_config_32(mport,
+					    RAB_APIO_AMAP_CTRL(win),
+					    ctrl & ~ENABLE_AMBA);
+		aoutb->riores = NULL;
+		aoutb->win = NULL;
+
+		spin_unlock_irqrestore(&rio_io_lock, flags);
+
+		iounmap(iowin);
+		if (release_resource(riores))
+			dev_err(priv->dev, "(%s) clean-up resource failed\n",
+				__func__);
+		kfree(riores);
+
+		spin_lock_irqsave(&rio_io_lock, flags);
+		aoutb->in_use = 0;
+	}
+
+	spin_unlock_irqrestore(&rio_io_lock, flags);
+}
+
+
+/**
+ * axxia_rio_set_mport_disc_mode - Set master port discovery/eumeration mode
+ *
+ * @mport: Master port
+ *
+ */
+void axxia_rio_set_mport_disc_mode(struct rio_mport *mport)
+{
+	u32 result;
+
+	if (mport->host_deviceid >= 0) {
+		__rio_local_write_config_32(mport, RIO_GCCSR,
+					    RIO_PORT_GEN_HOST |
+					    RIO_PORT_GEN_MASTER |
+					    RIO_PORT_GEN_DISCOVERED);
+	} else {
+		__rio_local_write_config_32(mport, RIO_GCCSR,
+					    RIO_PORT_GEN_MASTER);
+		__rio_local_write_config_32(mport, RIO_DID_CSR,
+					    RIO_SET_DID(mport->sys_size,
+					    RIO_ANY_DESTID(mport->sys_size)));
+	}
+
+#ifdef EXTRA1DEBUG
+	__rio_local_read_config_32(mport, RIO_GCCSR, &result);
+	EXT1P("rio[%d]: RIO_GEN_CTL_CSR set to 0x%X for main port\n",
+		mport->id, result);
+#endif
+
+	__rio_local_write_config_32(mport, RIO_COMPONENT_TAG_CSR, 0xFFFF);
+
+#ifdef	NOT_SUPPORTED
+	/* Use the reset default setting of (0x00000000).  RAB does not
+	 * support "Accept All=1".  We would need another ID value to use
+	 * if we wanted to set the PTPN and PTE=1. */
+
+	/* Set to receive any dist ID for serial RapidIO controller. */
+	if (mport->phy_type == RIO_PHY_SERIAL)
+		__rio_local_write_config_32(mport,
+					    EPC_PNPTAACR(mport->port_ndx),
+					    0x00000000);
+#endif
+
+#ifdef CONFIG_RAPIDIO_HOTPLUG
+	if (CONFIG_RAPIDIO_SECOND_DEST_ID != DESTID_INVALID) {
+		struct rio_priv *priv = mport->priv;
+
+		result = EPC_PNADIDCSR_ADE;
+		result |= EPC_PNADIDCSR_ADID_SMALL(
+				CONFIG_RAPIDIO_SECOND_DEST_ID);
+		__rio_local_write_config_32(mport,
+					    EPC_PNADIDCSR(priv->port_ndx),
+					    result);
+		dev_dbg(priv->dev, "Port%dAltDevIdmCSR set to 0x%X\n",
+			priv->port_ndx, CONFIG_RAPIDIO_SECOND_DEST_ID);
+	}
+#else
+	/* Set the Alternate Destination ID to prevent "Machine Checks"
+	** and aid the device enumeration / discovery process later on.
+	*/
+	{
+		struct rio_priv *priv = mport->priv;
+
+		result = EPC_PNADIDCSR_ADE;
+		if (mport->sys_size)
+			result |= EPC_PNADIDCSR_ADID_LARGE(~0);
+		else
+			result |= EPC_PNADIDCSR_ADID_SMALL(~0);
+		__rio_local_write_config_32(mport,
+					    EPC_PNADIDCSR(priv->port_ndx),
+					    result);
+		dev_dbg(priv->dev, "Port%dAltDevIdmCSR set to 0x%X\n",
+			priv->port_ndx, result);
+	}
+#endif
+}
+
+/**
+ * axxia_init_port_data - HW Setup of master port
+ *
+ * @mport: Master port
+ *
+ */
+static void axxia_init_port_data(struct rio_mport *mport)
+{
+	struct rio_priv *priv = mport->priv;
+	u32 ccsr, data;
+
+#if defined(CONFIG_AXXIA_RIO_16B_ID)
+	__rio_local_read_config_32(mport, RAB_SRDS_CTRL0, &data);
+	__rio_local_write_config_32(mport, RAB_SRDS_CTRL0,
+				    data | RAB_SRDS_CTRL0_16B_ID);
+#endif
+	/* Probe the master port phy type */
+	__rio_local_read_config_32(mport, RIO_CCSR(priv->port_ndx), &ccsr);
+	mport->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL;
+	dev_dbg(priv->dev, "RapidIO PHY type: %s\n",
+		 (mport->phy_type == RIO_PHY_PARALLEL) ? "parallel" :
+		 ((mport->phy_type == RIO_PHY_SERIAL) ? "serial" :
+		  "unknown"));
+
+	__rio_local_read_config_32(mport, RIO_PEF_CAR, &data);
+	mport->sys_size = (data & RIO_PEF_CTLS) >> 4;
+	dev_dbg(priv->dev, "RapidIO Common Transport System size: %d\n",
+		mport->sys_size ? 65536 : 256);
+
+	__rio_local_read_config_32(mport, RIO_DEV_ID_CAR, &priv->devid);
+	__rio_local_read_config_32(mport, RIO_DEV_INFO_CAR, &priv->devrev);
+	{
+		int i;
+		static const u32 legacyids[] = {
+			AXXIA_DEVID_ACP34XX,
+			AXXIA_DEVID_ACP25XX,
+		};
+		__rio_local_read_config_32(mport, RAB_CTRL, &data);
+		priv->intern_msg_desc = (data & 0x00001000) ? 1 : 0;
+		for (i = 0; i < 2; i++) {
+			if (priv->devid == legacyids[i])
+				priv->intern_msg_desc = 1;
+		}
+		EXT1P("rio[%d]: RapidIO internal descriptors: %d (%x %x)\n",
+			mport->id, priv->intern_msg_desc, priv->devid, data);
+	}
+}
+
+/**
+ * axxia_rio_info - Log Port HW setup
+ *
+ * @dev: RIO device
+ * @ccsr: Port N Error and Command Status register
+ *
+ */
+static void axxia_rio_info(struct device *dev, u32 ccsr)
+{
+	const char *str;
+	if (ccsr & 1) {
+		/* Serial phy */
+		switch (ccsr >> 30) {
+		case 0:
+			str = "1";
+			break;
+		case 1:
+			str = "4";
+			break;
+		default:
+			str = "Unknown";
+			break;
+		}
+		dev_dbg(dev, "Hardware port width: %s\n", str);
+
+		switch ((ccsr >> 27) & 7) {
+		case 0:
+			str = "Single-lane 0";
+			break;
+		case 1:
+			str = "Single-lane 2";
+			break;
+		case 2:
+			str = "Four-lane";
+			break;
+		default:
+			str = "Unknown";
+			break;
+		}
+		dev_dbg(dev, "Training connection status: %s\n", str);
+	} else {
+		/* Parallel phy */
+		if (!(ccsr & 0x80000000))
+			dev_dbg(dev, "Output port operating in 8-bit mode\n");
+		if (!(ccsr & 0x08000000))
+			dev_dbg(dev, "Input port operating in 8-bit mode\n");
+	}
+}
+
+/**
+ * rio_start_port - Check the master port
+ * @mport: Master port to be checked
+ *
+ * Check the type of the master port and if it is not ready try to
+ * restart the connection.
+ */
+static int rio_start_port(struct rio_mport *mport)
+{
+	struct rio_priv *priv = mport->priv;
+	u32 ccsr, escsr;
+
+	/* Probe the master port phy type */
+	__rio_local_read_config_32(mport, RIO_CCSR(priv->port_ndx), &ccsr);
+	__rio_local_read_config_32(mport, RIO_ESCSR(priv->port_ndx), &escsr);
+
+	if (escsr & RIO_ESCSR_PU) {
+
+		dev_err(priv->dev,
+			"Port is not ready/restart ordered. Try to restart "
+			"connection...\n");
+
+		/* Disable ports */
+		ccsr |= RIO_CCSR_PD;
+		__rio_local_write_config_32(mport, RIO_CCSR(priv->port_ndx),
+						ccsr);
+		switch (mport->phy_type) {
+		case RIO_PHY_SERIAL:
+			/* Set 1x lane */
+			ccsr &= ~RIO_CCSR_PWO;
+			ccsr |= RIO_CCSR_FORCE_LANE0;
+			__rio_local_write_config_32(mport,
+						RIO_CCSR(priv->port_ndx), ccsr);
+			break;
+		case RIO_PHY_PARALLEL:
+			break;
+		}
+
+		/* Enable ports */
+		ccsr &= ~RIO_CCSR_PD;
+		__rio_local_write_config_32(mport, RIO_CCSR(priv->port_ndx),
+					ccsr);
+		msleep(100);
+		__rio_local_read_config_32(mport, RIO_ESCSR(priv->port_ndx),
+					&escsr);
+		axxia_rio_info(priv->dev, ccsr);
+		if (escsr & RIO_ESCSR_PU) {
+			dev_dbg(priv->dev, "Port restart failed.\n");
+			return -ENOLINK;
+		} else {
+			dev_dbg(priv->dev, "Port restart success!\n");
+			return 0;
+		}
+	}
+
+#ifdef EXTRA1DEBUG
+	{
+		u32 hdlcsr, rabver;
+
+		__rio_local_read_config_32(mport, RIO_HOST_DID_LOCK_CSR,
+					&hdlcsr);
+		__rio_local_read_config_32(mport, RAB_VER, &rabver);
+
+		pr_info("rio[%d]: AR[%d] DIDCAR[%x]=%08x RAB_VER[%x]=%08x\n",
+			mport->id,
+			__LINE__,
+			RIO_DEV_ID_CAR, priv->devid,
+			RAB_VER, rabver);
+		pr_info("rio[%d]: AR[%d] [%x]=%08x [%x]=%08x [%x]=%08x\n",
+			mport->id,
+			__LINE__,
+			RIO_CCSR(priv->port_ndx), ccsr,
+			RIO_ESCSR(priv->port_ndx), escsr,
+			RIO_HOST_DID_LOCK_CSR, hdlcsr);
+	}
+#endif /* defined(EXTRA1DEBUG) */
+
+	dev_dbg(priv->dev, "Port is Ready\n");
+	return 0;
+}
+
+/**
+ * rio_rab_ctrl_setup - Bridge Control HW setup
+ *
+ * @mport: Master port
+ *
+ * Response Prio = request prio +1. 2) No AXI byte swap
+ * Internal (RIO Mem) DME desc access
+ * Priority based MSG arbitration
+ * RIO & AMBA PIO Enable
+ */
+static void rio_rab_ctrl_setup(struct rio_mport *mport)
+{
+	u32 rab_ctrl;
+
+	__rio_local_write_config_32(mport, AXI_TIMEOUT, 0x00001000);
+
+#ifdef USE_DME_TIMEOUT
+	__rio_local_write_config_32(mport, DME_TIMEOUT, 0xC0080000);
+#else
+	__rio_local_write_config_32(mport, DME_TIMEOUT, 0x00000000);
+#endif
+
+	rab_ctrl = 0;
+	rab_ctrl |= (1 << 12);
+	rab_ctrl |= (2 << 6);
+	rab_ctrl |= 3;
+	__rio_local_write_config_32(mport, RAB_CTRL, rab_ctrl);
+}
+
+/**
+ * rio_rab_pio_enable - Setup Peripheral Bus bridge,
+ *                      RapidIO <-> Peripheral bus, HW.
+ *
+ * @mport: Master port
+ *
+ * Enable AXI PIO + outbound nwrite/nread/maintenance
+ * Enable RIO PIO (enable rx maint port-write packet)
+ */
+static void rio_rab_pio_enable(struct rio_mport *mport)
+{
+	__rio_local_write_config_32(mport, RAB_APIO_CTRL,
+				    RAB_APIO_MAINT_MAP_EN |
+				    RAB_APIO_MEM_MAP_EN |
+				    RAB_APIO_PIO_EN);
+	__rio_local_write_config_32(mport, RAB_RPIO_CTRL, RAB_RPIO_PIO_EN);
+}
+
+/**
+ * rio_static_win_init -- Setup static ATMU window for maintenance
+ *                        access and enable doorbells
+ *
+ * @mport: Master port
+ *
+ * Returns:
+ * 0        - At success
+ * -EFAULT  - Requested outbound region can not be claimed
+ */
+int axxia_rio_static_win_init(struct rio_mport *mport)
+{
+	struct rio_priv *priv = mport->priv;
+	u32 ctrl;
+
+	/* Enable inbound doorbell */
+	__rio_local_write_config_32(mport, RAB_IB_DB_CSR, IB_DB_CSR_EN);
+
+	/* Configure maintenance transaction window */
+	if ((axxia_rio_req_outb_region(mport, CONFIG_RIO_MAINT_WIN_SIZE,
+				     "rio_maint_win", RIO_MAINT_WRITE,
+				     &priv->maint_win_id)) < 0)
+		goto err;
+
+	__rio_local_read_config_32(mport,
+				   RAB_APIO_AMAP_CTRL(priv->maint_win_id),
+				   &ctrl);
+	/* Enable window */
+	ctrl |= ENABLE_AMBA;
+	__rio_local_write_config_32(mport,
+				    RAB_APIO_AMAP_CTRL(priv->maint_win_id),
+				    ctrl);
+
+	return 0;
+err:
+	return -EFAULT;
+}
+
+/**
+ * axxia_rio_static_win_release -- Release static ATMU maintenance window
+ *                                 Disable doorbells
+ *
+ * @mport: Master port
+ *
+ */
+void axxia_rio_static_win_release(struct rio_mport *mport)
+{
+	struct rio_priv *priv = mport->priv;
+	u32 ibdb;
+
+	/* Disable inbound doorbell */
+	__rio_local_read_config_32(mport, RAB_IB_DB_CSR, &ibdb);
+	ibdb &= ~IB_DB_CSR_EN;
+	__rio_local_write_config_32(mport, RAB_IB_DB_CSR, ibdb);
+
+	/* Release maintenance transaction window */
+	axxia_rio_release_outb_region(mport, priv->maint_win_id);
+}
+
+/**
+ * rio_parse_dtb - Parse RapidIO platform entry
+ *
+ * @dev: RIO platform device
+ * @ndx: Which instance are we?
+ * @law_start: Local Access Window start address from DTB
+ * @law_size: Local Access Window size from DTB
+ * @regs: RapidIO registers from DTB
+ * @ob_num_dmes: Number of outbound DMEs available
+ * @outb_dmes: RapidIO outbound DMEs array available;
+ *                [0] for MSeg, [1] for SSeg
+ * @ib_num_dmes: Number of inbound DMEs available
+ * @inb_dmes: RapidIO inbound DMEs array available; 2 elements
+ * @irq: RapidIO IRQ mapping from DTB
+ *
+ * Returns:
+ * -EFAULT          At failure
+ * 0                Success
+ */
+static int rio_parse_dtb(
+	struct platform_device *dev,
+	int *ndx,
+	u64 *law_start,
+	u64 *law_size,
+	struct resource *regs,
+	int *ob_num_dmes,
+	int *outb_dmes,
+	int *ib_num_dmes,
+	int *inb_dmes,
+	int *irq,
+	struct event_regs *linkdown_reset)
+{
+	const u32 *dt_range, *cell;
+	int rlen, rc;
+	int paw, aw, sw;
+
+	if (!dev->dev.of_node) {
+		dev_err(&dev->dev, "Device OF-Node is NULL");
+		return -EFAULT;
+	}
+
+	if (!of_device_is_available(dev->dev.of_node)) {
+		EXT1P("rio[%d]: AR[%d] status = not available\n", 99, __LINE__);
+		return -ENODEV;
+	} else {
+		EXT1P("rio[%d]: AR[%d] status = available\n", 99, __LINE__);
+	}
+
+	if (of_property_read_u32(dev->dev.of_node, "index", &rlen))
+		return -ENODEV;
+	*ndx = rlen;
+
+	rc = of_address_to_resource(dev->dev.of_node, 0, regs);
+	if (rc) {
+		dev_err(&dev->dev, "Can't get %s property 'reg'\n",
+			dev->dev.of_node->full_name);
+		return -EFAULT;
+	}
+	dev_dbg(&dev->dev,
+		"Of-device full name %s\n",
+		 dev->dev.of_node->full_name);
+	dev_dbg(&dev->dev, "Regs: %pR\n", regs);
+
+	dt_range = of_get_property(dev->dev.of_node, "ranges", &rlen);
+
+	if (!dt_range) {
+		dev_err(&dev->dev, "Can't get %s property 'ranges'\n",
+			dev->dev.of_node->full_name);
+		return -EFAULT;
+	}
+
+	/* Get node address wide */
+	cell = of_get_property(dev->dev.of_node, "#address-cells", NULL);
+	if (cell)
+		aw = *cell;
+	else
+		aw = of_n_addr_cells(dev->dev.of_node);
+	if (aw > 3)			/* Anomaly in A15 build+parse */
+		aw = 2;
+	/* Get node size wide */
+	cell = of_get_property(dev->dev.of_node, "#size-cells", NULL);
+	if (cell)
+		sw = *cell;
+	else
+		sw = of_n_size_cells(dev->dev.of_node);
+	if (sw > 3)			/* Anomaly in A15 build+parse */
+		sw = 2;
+	/* Get parent address wide wide */
+	paw = of_n_addr_cells(dev->dev.of_node);
+
+	*law_start = of_read_number(dt_range + aw, paw);
+	*law_size = of_read_number(dt_range + aw + paw, sw);
+
+	dev_dbg(&dev->dev, "LAW: [mem 0x%016llx -- 0x%016llx]\n",
+		*law_start, *law_start + *law_size - 1);
+
+	outb_dmes[0] = outb_dmes[1] = 0;
+	cell = of_get_property(dev->dev.of_node, "outb-dmes", &rlen);
+	if (!cell) {
+		ob_num_dmes[0] = 2;
+		ob_num_dmes[1] = 1;
+		outb_dmes[0] = 0x00000003;
+		outb_dmes[1] = 0x00000001;
+	} else {
+		if (rlen < (4 * sizeof(int))) {
+			dev_err(&dev->dev, "Invalid %s property 'outb-dmes'\n",
+				dev->dev.of_node->full_name);
+			return -EFAULT;
+		}
+		ob_num_dmes[0] = of_read_number(cell, 1);
+		outb_dmes[0] = of_read_number(cell + 1, 1);
+		ob_num_dmes[1] = of_read_number(cell + 2, 1);
+		outb_dmes[1] = of_read_number(cell + 3, 1);
+		if (((ob_num_dmes[0])+(ob_num_dmes[1])) > DME_MAX_OB_ENGINES) {
+			dev_err(&dev->dev, "Invalid %s property 'outb-dmes'\n",
+				dev->dev.of_node->full_name);
+			return -EFAULT;
+		}
+	}
+	dev_dbg(&dev->dev, "outb-dmes: MSeg[%d]=%08x SSeg[%d]=%08x\n",
+		ob_num_dmes[0], outb_dmes[0], ob_num_dmes[1], outb_dmes[1]);
+
+	inb_dmes[0] = inb_dmes[1] = 0;
+	cell = of_get_property(dev->dev.of_node, "inb-dmes", &rlen);
+	if (!cell) {
+		ib_num_dmes[0] = DME_MAX_IB_ENGINES;
+		ib_num_dmes[1] = 0;
+		inb_dmes[0] = 0xffffffff;
+		inb_dmes[1] = 0x00000000;
+	} else {
+		if (rlen < (4 * sizeof(int))) {
+			dev_err(&dev->dev, "Invalid %s property 'inb-dmes'\n",
+				dev->dev.of_node->full_name);
+			return -EFAULT;
+		}
+		ib_num_dmes[0] = of_read_number(cell, 1);
+		inb_dmes[0] = of_read_number(cell + 1, 1);
+		ib_num_dmes[1] = of_read_number(cell + 2, 1);
+		inb_dmes[1] = of_read_number(cell + 3, 1);
+		if (((ib_num_dmes[0])+(ib_num_dmes[1])) > DME_MAX_IB_ENGINES) {
+			dev_err(&dev->dev, "Invalid %s property 'inb-dmes'\n",
+				dev->dev.of_node->full_name);
+			return -EFAULT;
+		}
+	}
+	dev_dbg(&dev->dev, "inb-dmes: MSeg[%d]=%08x SSeg[%d]=%08x\n",
+		ib_num_dmes[0], inb_dmes[0], ib_num_dmes[1], inb_dmes[1]);
+
+	*irq = irq_of_parse_and_map(dev->dev.of_node, 0);
+	dev_dbg(&dev->dev, "irq: %d\n", *irq);
+
+	memset(linkdown_reset, 0, sizeof(struct event_regs));
+	dt_range = of_get_property(dev->dev.of_node, "linkdown-reset", &rlen);
+	if (dt_range) {
+		if (rlen < (6 * sizeof(int))) {
+			dev_err(&dev->dev,
+				"Invalid %s property 'linkdown-reset'\n",
+				dev->dev.of_node->full_name);
+			return -EFAULT;
+		} else {
+			linkdown_reset->phy_reset_start =
+				of_read_number(dt_range + aw, paw);
+			linkdown_reset->phy_reset_size =
+				of_read_number(dt_range + aw + paw, sw);
+			linkdown_reset->reg_addr =
+				of_read_number(dt_range + 0, 1);
+			linkdown_reset->reg_mask =
+				of_read_number(dt_range + 1, 1);
+			linkdown_reset->in_use = 1;
+			EXT1P("rio: LDR st=%llx sz=%llx RA=%x MSK=%x iu=%d\n",
+				linkdown_reset->phy_reset_start,
+				linkdown_reset->phy_reset_size,
+				linkdown_reset->reg_addr,
+				linkdown_reset->reg_mask,
+				linkdown_reset->in_use);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * rio_ops_setup - Alloc and initiate the RIO ops struct
+ *
+ * Returns:
+ * ERR_PTR(-ENOMEM)      At failure
+ * struct rio_ops *ptr   to initialized ops data at Success
+ */
+static struct rio_ops *rio_ops_setup(void)
+{
+	struct rio_ops *ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+
+	if (!ops)
+		return ERR_PTR(-ENOMEM);
+
+	ops->lcread = axxia_rio_local_config_read;
+	ops->lcwrite = axxia_rio_local_config_write;
+	ops->cread = axxia_rio_config_read;
+	ops->cwrite = axxia_rio_config_write;
+	ops->dsend = axxia_rio_doorbell_send;
+	ops->pwenable = axxia_rio_pw_enable;
+	ops->open_outb_mbox = axxia_open_outb_mbox;
+	ops->open_inb_mbox = axxia_open_inb_mbox;
+	ops->close_outb_mbox = axxia_close_outb_mbox;
+	ops->close_inb_mbox = axxia_close_inb_mbox;
+	ops->add_outb_message = axxia_ml_add_outb_message;
+	ops->add_inb_buffer = axxia_add_inb_buffer;
+	ops->get_inb_message = axxia_ml_get_inb_message;
+#ifdef CONFIG_RAPIDIO_HOTPLUG
+	ops->hotswap = axxia_rio_hotswap;
+	ops->port_notify_cb = axxia_rio_port_notify_cb;
+	ops->port_op_state = axxia_rio_port_op_state;
+#endif
+	return ops;
+}
+
+/**
+ * rio_mport_dtb_setup - Alloc and initialize the master port data
+ *                       structure with data retrieved from DTB
+ *
+ * @dev: RIO platform device
+ * @law_start: Local Access Window start address from DTB
+ * @law_size: Local Access Window size from DTB
+ * @ops: RIO ops data structure
+ *
+ * Init mport data structure
+ * Request RIO iomem resources
+ * Register doorbell and mbox resources with generic RIO driver
+
+ * Returns:
+ * -ENOMEM                 At failure
+ * struct rio_mport *ptr   to initialized mport data at Success
+ */
+static int rio_mport_dtb_setup(struct platform_device *dev,
+			       int port_ndx,
+			       u64 law_start,
+			       u64 law_size,
+			       struct rio_ops *ops,
+			       struct rio_mport **ptr)
+{
+	int rc = 0;
+	struct rio_mport *mport = kzalloc(sizeof(*mport), GFP_KERNEL);
+
+	(*ptr) = NULL;
+
+	if (!mport)
+		return -ENOMEM;
+
+	mport->index = port_ndx;
+
+	INIT_LIST_HEAD(&mport->dbells);
+	mport->iores.start = law_start;
+	mport->iores.end = law_start + law_size - 1;
+	mport->iores.flags = IORESOURCE_MEM;
+	mport->iores.name = "rio_io_win";
+	mport->iores.parent = NULL;
+	mport->iores.child = NULL;
+	mport->iores.sibling = NULL;
+
+	if (request_resource(&iomem_resource, &mport->iores) < 0) {
+		dev_err(&dev->dev,
+			"RIO: Error requesting master port region "
+			"0x%016llx-0x%016llx\n",
+			(u64)mport->iores.start, (u64)mport->iores.end);
+		kfree(mport);
+		return -ENOMEM;
+	}
+	rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
+	rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0,
+			RIO_MAX_RX_MBOX);
+	rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0,
+			RIO_MAX_TX_MBOX);
+	sprintf(mport->name, "RIO%d mport", mport->id);
+
+	mport->ops = ops;
+	mport->phys_efptr = 0x100; /* define maybe */
+
+	(*ptr) = mport;
+	return rc;
+}
+
+/**
+ * rio_priv_dtb_setup - Alloc and initialize the master port private data
+ *                      structure with data retrieved from DTB
+ *
+ * @dev: RIO platform device
+ * @regs: RapidIO registers from DTB
+ * @mport: master port
+ * @ndx: Instance Id of the controller description
+ * @port_ndx: Port Id of the controller
+ * @numObNumDmes: override num outbound DMEs available
+ * @outb_dmes: RapidIO outbound DMEs array available; [0] for MSeg, [1] for SSeg
+ * @numIbNumDmes: override num inbound DMEs available
+ * @inb_dmes: RapidIO inbound DMEs array available; 2 elements
+ * @irq: IRQ number
+ *
+ * Init master port private data structure
+ *
+ * Returns:
+ * ERR_PTR(-ENOMEM)        At failure
+ * struct rio_priv *ptr    to initialized priv data at Success
+ */
+
+static struct rio_priv *rio_priv_dtb_setup(
+	struct platform_device *dev,
+	struct resource *regs,
+	struct rio_mport *mport,
+	int ndx,
+	int port_ndx,
+	int *num_outb_dmes,
+	int *outb_dmes,
+	int *num_inb_dmes,
+	int *inb_dmes,
+	int irq,
+	struct event_regs *linkdown_reset)
+{
+	struct rio_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	int i, rc;
+
+	if (!priv)
+		return ERR_PTR(-ENOMEM);
+
+	/* master port driver handle (bidirectional reference supported) */
+	mport->priv = priv;
+	priv->cookie = LSI_AXXIA_RIO_COOKIE;
+	priv->mport = mport;
+	priv->ndx = ndx;
+	priv->port_ndx = port_ndx;
+	mutex_init(&priv->api_lock);
+	/* Max descriptors */
+	priv->desc_max_entries = RIO_MSG_MAX_ENTRIES;
+
+	if (priv->intern_msg_desc) {
+		/* Support for alloc_message_engine() */
+		struct resource *dres = &priv->acpres[ACP_HW_DESC_RESOURCE];
+
+		memset(dres, 0, sizeof(struct resource));
+			/* 'virtual' mapping of descriptors */
+		dres->start = 0;
+		dres->end = priv->desc_max_entries - 1;
+		dres->flags = ACP_RESOURCE_HW_DESC;
+		dres->name = "rio_desc_win";
+		dres->parent = NULL;
+		dres->child = NULL;
+		dres->sibling = NULL;
+
+		if (request_resource(&iomem_resource, dres) < 0) {
+			dev_err(&dev->dev,
+				"RIO: Error requesting descriptor region "
+				"0x%016llx-0x%016llx\n",
+				(u64)dres->start, (u64)dres->end);
+			rc = -ENOMEM;
+			goto err_fixed;
+		}
+	}
+
+	/* Defined DMEs */
+	if (outb_dmes) {
+		priv->num_outb_dmes[0] = num_outb_dmes[0];
+		priv->num_outb_dmes[1] = num_outb_dmes[1];
+		priv->outb_dmes[0] = outb_dmes[0];
+		priv->outb_dmes[1] = outb_dmes[1];
+	}
+	if (inb_dmes) {
+		priv->num_inb_dmes[0] = num_inb_dmes[0];
+		priv->num_inb_dmes[1] = num_inb_dmes[1];
+		priv->inb_dmes[0] = inb_dmes[0];
+		priv->inb_dmes[1] = inb_dmes[1];
+	}
+
+	/* Interrupt handling */
+	priv->irq_line = irq;
+	axxia_rio_port_irq_init(mport);
+
+	/* Dev ptr for debug printouts */
+	priv->dev = &dev->dev;
+
+	/* Init ATMU data structures */
+	for (i = 0; i < RIO_OUTB_ATMU_WINDOWS; i++) {
+		priv->outb_atmu[i].in_use = 0;
+		priv->outb_atmu[i].riores = NULL;
+	}
+
+	/* Setup local access */
+	priv->regs_win_fixed = ioremap(regs->start, SRIO_CONF_SPACE_SIZE_FIXED);
+	if (!priv->regs_win_fixed) {
+		rc = -ENOMEM;
+		goto err_fixed;
+	}
+	priv->regs_win_paged = ioremap(regs->start + SRIO_CONF_SPACE_SIZE_FIXED,
+					SRIO_CONF_SPACE_SIZE_PAGED);
+	if (!priv->regs_win_paged) {
+		rc = -ENOMEM;
+		goto err_paged;
+	}
+	if (linkdown_reset && linkdown_reset->in_use) {
+		memcpy(&priv->linkdown_reset, linkdown_reset,
+			sizeof(struct event_regs));
+		priv->linkdown_reset.win =
+			ioremap(linkdown_reset->phy_reset_start,
+				linkdown_reset->phy_reset_size);
+		if (!priv->linkdown_reset.win) {
+			rc = -ENOMEM;
+			goto err_linkdown;
+		}
+		EXT1P("rio[%d]: LDR win=%p\n", mport->id,
+			priv->linkdown_reset.win);
+	}
+
+	return priv;
+
+err_linkdown:
+	if (priv->linkdown_reset.win)
+		iounmap(priv->linkdown_reset.win);
+	iounmap(priv->regs_win_paged);
+err_paged:
+	iounmap(priv->regs_win_fixed);
+err_fixed:
+	kfree(priv);
+	return ERR_PTR(rc);
+}
+
+/**
+ * axxia_rio_start_port - Start master port
+ *
+ * @mport: Master port
+ *
+ * Check the type of the master port and if it is not ready try to
+ * restart the connection.
+ * In hotplug mode we don't really care about connection state
+ * elsewise we give up if the port is not up.
+ *
+ * Setup HW for basic memap access support:
+ * enable AXI bridge, maintenance window, doorbells, etc..
+ */
+int axxia_rio_start_port(struct rio_mport *mport)
+{
+	int rc;
+	struct rio_priv *priv = mport->priv;
+
+	/*
+	 * Set port line request ack timout 1.5 - 3 s
+	 * Set port response timeout 1.5 - 3 s
+	 */
+	__rio_local_write_config_32(mport, RIO_PLTOCCSR, 0x7fffff);
+	__rio_local_write_config_32(mport, RIO_PRTOCCSR, 0x7fffff);
+
+	/* Check port training state:
+	 */
+
+	rc = rio_start_port(mport);
+	if (rc < 0) {
+#ifdef CONFIG_RAPIDIO_HOTPLUG
+		dev_warn(priv->dev, "Link is down - will continue anyway\n");
+#else
+		dev_err(priv->dev, "Link is down - SRIO Init failed\n");
+		return rc;
+#endif
+	}
+
+	/* Enable memory mapped access
+	 */
+	rio_rab_ctrl_setup(mport);
+
+	rio_rab_pio_enable(mport);
+
+	/* Miscellaneous
+	 */
+	__rio_local_write_config_32(mport, RAB_OB_DME_TID_MASK,
+				    OB_DME_TID_MASK);
+
+
+	/* Setup maintenance window
+	 * Enable doorbells
+	 */
+	rc = axxia_rio_static_win_init(mport);
+
+	return rc;
+}
+
+/**
+ * axxia_rio_setup - Setup AXXIA RapidIO interface
+ * @dev: platform_device pointer
+ *
+ * Initializes AXXIA RapidIO hardware interface, configures
+ * master port with system-specific info, and registers the
+ * master port with the RapidIO subsystem.
+ *
+ * Init sequence is divided into two phases
+ * 1:
+ *    All one-time initialization: e.g. driver software structures,
+ *    work queues, tasklets, sync resources etc. are allocated and
+ *    and initialized. At this stage No HW access is possible, to avoid
+ *    race conditions, all HW accesses to local configuration space must
+ *    be handled through the generic RIO driver access functions and
+ *    these may not be used prior to init of master port data structure.
+ * 2:
+ *    Setup and try to start RapidIO master port controller HW
+ *    If the driver is built with hotplug support, the setup routine
+ *    does not require that the link is up to complete successfully,
+ *    the port may be restarted at any point later in time. Without
+ *    hotplug the setup function will fail if link tranining sequence
+ *    doesn't complete successfully.
+ *
+ * Returns:
+ * <0           Failure
+ * 0            Success
+ */
+static int axxia_rio_setup(struct platform_device *dev)
+{
+	int rc = -EFAULT;
+	struct rio_ops *ops;
+	struct rio_mport *mport;
+	struct rio_priv *priv;
+	struct resource regs;
+	u64 law_start = 0, law_size = 0;
+	int ndx = 0, irq = 0, port_ndx = 0;
+	int numObDmes[2] = { 0, }, outb_dmes[2] = { 0, };
+	int numIbDmes[2] = { 0, }, inb_dmes[2] = { 0, };
+	struct event_regs linkdown_reset = { 0, };
+#ifdef CONFIG_AXXIA_RIO_DS
+	struct axxia_rio_ds_dtb_info ds_dtb_info; /* data_streaming */
+#endif
+
+	/* Get address boundaries, etc. from DTB */
+	if (rio_parse_dtb(dev, &ndx, &law_start, &law_size, &regs,
+			  &numObDmes[0], &outb_dmes[0],
+			  &numIbDmes[0], &inb_dmes[0],
+			  &irq, &linkdown_reset))
+		return -EFAULT;
+
+	rc = axxia_rapidio_board_init(dev, ndx, &port_ndx);
+	if (rc != 0)
+		return rc;
+#ifdef CONFIG_AXXIA_RIO_DS
+	rc = axxia_parse_dtb_ds(dev, &ds_dtb_info);
+	if (rc != 0)
+		return rc;
+#endif
+	/* Alloc and Initialize driver SW data structure */
+	ops = rio_ops_setup();
+	if (IS_ERR(ops)) {
+		rc = PTR_ERR(ops);
+		goto err_ops;
+	}
+	rc = rio_mport_dtb_setup(dev, port_ndx, law_start, law_size,
+				 ops, &mport);
+	if (rc != 0)
+		goto err_port;
+	priv = rio_priv_dtb_setup(dev, &regs, mport, ndx, port_ndx,
+				  &numObDmes[0], &outb_dmes[0],
+				  &numIbDmes[0], &inb_dmes[0],
+				  irq, &linkdown_reset);
+	if (IS_ERR(priv)) {
+		rc = PTR_ERR(priv);
+		goto err_priv;
+	}
+
+	/* !!! HW access to local config space starts here !!! */
+
+	/* Get and set master port data
+	 */
+	axxia_init_port_data(mport);
+
+	/* Start port and enable basic memmap access
+	 */
+	rc = axxia_rio_start_port(mport);
+	if (rc < 0)
+		goto err_maint;
+
+	/* Hookup IRQ handlers
+	 */
+	if (axxia_rio_port_irq_enable(mport))
+		goto err_irq;
+
+	/* Hookup SYSFS support
+	 */
+	dev_set_drvdata(&dev->dev, mport);
+#ifdef CONFIG_AXXIA_RIO_STAT
+	axxia_rio_init_sysfs(dev);
+#endif
+#ifdef CONFIG_AXXIA_RIO_DS
+	/* Data_streaming */
+	if (ds_dtb_info.ds_enabled == 1) {
+		rc = axxia_cfg_ds(mport, &ds_dtb_info);
+		if (rc)
+			goto err_mport;
+		axxia_rio_ds_port_irq_init(mport);
+	}
+#endif
+	/* Register port with core driver
+	 */
+	if (rio_register_mport(mport)) {
+		dev_err(&dev->dev, "register mport failed\n");
+		goto err_mport;
+	}
+
+	/* Correct the host device id if needed
+	 */
+	{
+		u16 id = rio_local_get_device_id(mport);
+		EXT1P("rio[%d]: AR[%d] devid=%d hdid=%d\n",
+			mport->id, __LINE__,
+			mport->host_deviceid, rio_local_get_device_id(mport));
+		if (mport->host_deviceid < 0) {
+			if ((id != 0xFF) && (mport->sys_size == 0))
+				mport->host_deviceid = id;
+			else if ((id != 0xFFFF) && (mport->sys_size != 0))
+				mport->host_deviceid = id;
+		}
+	}
+
+	/* Any changes needed based on device id / revision ?
+	*/
+	switch (priv->devid) {
+	case AXXIA_DEVID_ACP25XX:
+		priv->outb_dmes[1] = 0x00000000;
+		break;
+	case AXXIA_DEVID_AXM55XX:
+		switch (priv->devrev) {
+		case AXXIA_DEVREV_AXM55XX_V1_0:
+			priv->outb_dmes[1] = 0x00000000;
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+
+	/* And set the discovery mode for this port before we go
+	 */
+	axxia_rio_set_mport_disc_mode(mport);
+
+	EXT1P("rio[%p:%d]: priv=%p\n", mport, mport->id,
+		priv);
+	return 0;
+
+err_mport:
+	axxia_rio_port_irq_disable(mport);
+#ifdef CONFIG_AXXIA_RIO_STAT
+	axxia_rio_release_sysfs(dev);
+#endif
+err_irq:
+	axxia_rio_static_win_release(mport);
+err_maint:
+	if (priv->linkdown_reset.win)
+		iounmap(priv->linkdown_reset.win);
+	iounmap(priv->regs_win_fixed);
+	iounmap(priv->regs_win_paged);
+	kfree(priv);
+err_priv:
+	kfree(mport);
+err_port:
+	kfree(ops);
+err_ops:
+	irq_dispose_mapping(irq);
+	return rc;
+}
+
+/*
+  The probe function for RapidIO peer-to-peer network.
+*/
+static int axxia_of_rio_rpn_probe(struct platform_device *dev)
+{
+	EXT1P(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n",
+	       dev->dev.of_node->full_name);
+
+	return axxia_rio_setup(dev);
+};
+
+static const struct of_device_id axxia_of_rio_rpn_ids[] = {
+	{ .compatible = "axxia,rapidio-delta", },
+	{ .compatible = "acp,rapidio-delta", },
+	{},
+};
+
+static struct platform_driver axxia_of_rio_rpn_driver = {
+	.driver = {
+		.name = "axxia-of-rio",
+		.owner = THIS_MODULE,
+		.of_match_table = axxia_of_rio_rpn_ids,
+	},
+	.probe = axxia_of_rio_rpn_probe,
+};
+
+static __init int axxia_of_rio_rpn_init(void)
+{
+	EXT1P(KERN_INFO "Register RapidIO platform driver\n");
+	return platform_driver_register(&axxia_of_rio_rpn_driver);
+}
+
+subsys_initcall_sync(axxia_of_rio_rpn_init);
diff --git a/drivers/rapidio/devices/lsi/axxia-rio.h b/drivers/rapidio/devices/lsi/axxia-rio.h
new file mode 100644
index 0000000..479be4b
--- /dev/null
+++ b/drivers/rapidio/devices/lsi/axxia-rio.h
@@ -0,0 +1,599 @@
+/*
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _AXXIA_RIO_H_
+#define _AXXIA_RIO_H_
+
+#include <linux/device.h>
+#include <linux/of_platform.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+
+#include "axxia-rio-irq.h"
+/* Constants, Macros, etc. */
+
+#define AXXIA_RIO_SYSMEM_BARRIER()	smp_mb()
+
+
+/*****************************************/
+/* *********** Byte Swapping *********** */
+/*****************************************/
+#define BSWAP(x)  __builtin_bswap32(x)     /* Use gcc built-in byte swap code */
+
+#define DESTID_INVALID (u16)(0xff)
+#define RIO_SET_DID(size, x)	(size ? (x & 0xffff) : ((x & 0x000000ff) << 16))
+
+/*******************************/
+/* *********** ACP *********** */
+/*******************************/
+#define ACP_MAX_RESOURCES       16
+#define ACP_HW_DESC_RESOURCE    0
+#define ACP_RESOURCE_HW_DESC	0x00000100
+
+
+/*****************************************/
+/* *********** ACP/AXXIA REG *********** */
+/*****************************************/
+#define SRIO_CONF_SPACE_SIZE          0x1000
+#define SRIO_CONF_SPACE_SIZE_FIXED    0x0800
+#define SRIO_CONF_SPACE_SIZE_PAGED    0x0800
+
+#define SRIO_SPACE_SIZE         0x40000      /* Total size GRIO + RAB Spaces */
+
+/* End point models & revisions */
+#define AXXIA_DEVID_ACP34XX		0x5101000a
+#define AXXIA_DEVID_ACP25XX		0x5108000a
+#define AXXIA_DEVID_AXM55XX		0x5120000a
+#define   AXXIA_DEVREV_AXM55XX_V1_0	  0x00000000
+#define   AXXIA_DEVREV_AXM55XX_V1_1	  0x00000001
+#define AXXIA_DEVID_AXM35XX		0x5102000a
+
+/* End Point Controller Specific Registers (0x1_0000-0x1_FFFC) */
+#define EPC_REG_BASE            0x10000
+#define EPC_PNADIDCSR(x)        (EPC_REG_BASE + (0x100+((x)*0x80)))
+#define  EPC_PNADIDCSR_ADE	(1 << 31)
+#define  EPC_PNADIDCSR_ADID_SMALL(id)	((u32)((id) & 0x00ff) << 16)
+#define  EPC_PNADIDCSR_ADID_LARGE(id)	((u32)((id) & 0xffff) <<  0)
+#define EPC_PNPTAACR(x)	        (EPC_REG_BASE + (0x120+((x)*0x80)))
+#define EPC_IECSR(x)            (EPC_REG_BASE + (0x130+((x)*0x80)))
+#define  EPC_IECSR_RETE         0x80000000   /*WOCL*/
+
+/* Peripheral Bus Bridge Specific Registers (0x2_0000-0x3_FFFC) */
+#define RAB_REG_BASE            0x20000
+#define RAB_VER                 (RAB_REG_BASE + 0x00)
+#define RAB_APB_CSR_BASE        0x30
+#define RAB_APB_CSR             (RAB_REG_BASE + RAB_APB_CSR_BASE)
+#define RAB_CTRL                (RAB_REG_BASE + 0x08)
+#define RAB_STAT                (RAB_REG_BASE + 0x0c)
+#define AXI_TIMEOUT             (RAB_REG_BASE + 0x10)
+#define DME_TIMEOUT             (RAB_REG_BASE + 0x14)
+
+#define RAB_PIO_RESET           (RAB_REG_BASE + 0x18)
+#define  RAB_PIO_RESET_RPIO     0x00000100
+#define  RAB_PIO_RESET_APIO     0x00000001
+
+#define RAB_COOP_LOCK           (RAB_REG_BASE + 0x1C)
+#define RAB_IB_PW_CSR           (RAB_REG_BASE + 0x28)
+#define RAB_IB_PW_EN                    0x1UL
+#define RAB_IB_PW_NUMWORDS(csr) (((csr) & 0x001f0000) >> 16)
+
+#define RAB_IB_PW_DATA          (RAB_REG_BASE + 0x2C)
+
+#define RAB_RPIO_CTRL           (RAB_REG_BASE + 0x80)
+#define RAB_RPIO_PIO_EN                 0x1
+#define RAB_RPIO_RELAX_ORDER            0x2
+
+#define RAB_RPIO_STAT           (RAB_REG_BASE + 0x84)
+#define  RAB_RPIO_STAT_RSP_ERR  0x00000004
+#define  RAB_RPIO_STAT_ADDR_MAP 0x00000002
+#define  RAB_RPIO_STAT_DISABLED 0x00000001
+
+/* AXI PIO (outbound)*/
+#define RAB_APIO_CTRL           (RAB_REG_BASE + 0x180)
+#define RAB_APIO_CPP_EN                 0x8
+#define RAB_APIO_MAINT_MAP_EN           0x4
+#define RAB_APIO_MEM_MAP_EN             0x2
+#define RAB_APIO_PIO_EN                 0x1
+
+/* SRIO PHY Control */
+#define RAB_SRDS_CTRL0          (RAB_REG_BASE + 0x980)
+#define  RAB_SRDS_CTRL0_16B_ID   0x00000004
+
+#define RAB_SRDS_CTRL1          (RAB_REG_BASE + 0x984)
+#define  RAB_SRDS_CTRL1_RST      0x00000001
+
+#define RAB_SRDS_CTRL2          (RAB_REG_BASE + 0x988)
+#define RAB_SRDS_STAT0          (RAB_REG_BASE + 0x990)
+#define RAB_SRDS_STAT1          (RAB_REG_BASE + 0x994)
+#define  RAB_SRDS_STAT1_LINKDOWN_INT    0x80000000
+
+/* AW SMON control */
+#define RAB_SMON_CTRL0          (RAB_REG_BASE + 0x9A0)
+#define  RAB_SMON_CTRL0_INT_TM_OF       0x00200000
+#define  RAB_SMON_CTRL0_INT_CNT_OF      0x00020000
+#define  RAB_SMON_CTRL0_ENB             0x00000001
+
+
+/* register contains transaction type, target id */
+#define RAB_APIO_AMAP_CTRL(n)   (RAB_REG_BASE + (0x200 + (n * 0x10)))
+#define MRD_MWR                         (0x0)
+#define NRD_NWR                         (0x1)
+#define NRD_NWR_R                       (0x2)
+#define NRD_SWR                         (0x3)
+#define TTYPE(type)                     (((u32)(type) & 0x3) << 1)
+#define TTYPE_VAL(reg)                  (((reg) >> 1) & 0x3)
+#define TARGID(tid)                     (((u32)(tid) & 0xffff) << 16)
+#define ENABLE_AMBA                     (0x1UL)
+
+/* register contains the AXI window size */
+#define RAB_APIO_AMAP_SIZE(n)   (RAB_REG_BASE + (0x204 + (n * 0x10)))
+#define WIN_SIZE(size)                  (size & 0xfffffc00)
+
+/* register for AXI based address for window */
+#define RAB_APIO_AMAP_ABAR(n)		(RAB_REG_BASE + (0x208 + (n * 0x10)))
+#define AXI_BASE_HIGH(addr)             ((u32)(((u64)(addr) & 0x3f00000000ULL) \
+					 >> 32) << 22)
+#define AXI_BASE(addr)                  (((u32)(addr) & 0xfffffc00) >> 10)
+
+/* Register for RIO base address */
+#define RAB_APIO_AMAP_RBAR(n)   (RAB_REG_BASE + (0x20C + (n * 0x10)))
+#define RIO_ADDR_BASE(taddr)            (((u32)(taddr) & 0xfffffc00) >> 10)
+#define RIO_ADDR_OFFSET(taddr)          ((u32)(taddr) & 0x3ff)
+#define HOP_COUNT(hop_cnt)              (((u32)(hop_cnt) & 0xff) << 14)
+
+
+/* Other */
+#define RAB_LFC_BLOCKED         (RAB_REG_BASE + 0x964)
+#define RAB_SRDS_CTRL0          (RAB_REG_BASE + 0x980)
+
+
+/* Interrupt registers */
+#define RAB_INTR_ENAB_GNRL      (RAB_REG_BASE + 0x40)
+/* General int enable  bits */
+#define RAB_INT_OB_DB_EN        (1 << 31)
+#define EXT_INT_OB_DB_EN        (0xff << 16)
+#define MISC_INT_EN             (1 << 6)
+#define OB_DME_INT_EN           (1 << 5)
+#define IB_DME_INT_EN           (1 << 4)
+#define RPIO_INT_EN             (1 << 1)
+#define APIO_INT_EN             (1)
+
+#define RAB_INTR_ENAB_GNRL_SET  (MISC_INT_EN | RPIO_INT_EN | \
+			 APIO_INT_EN/* | OB_DME_INT_EN | IB_DME_INT_EN*/)
+
+#define RAB_INTR_STAT_GNRL      (RAB_REG_BASE + 0x60)
+/* General int status bits */
+#define RAB_INTERNAL_STAT       (1 << 31)
+#define EXT_INT_STATUS          (0xff << 16)
+#define MISC_INT                (1 << 6)
+#define OB_DME_INT              (1 << 5)
+#define IB_DME_INT              (1 << 4)
+#define RPIO_INT                (1 << 1)
+#define APIO_INT                (1)
+
+#define RAB_INTR_ENAB_APIO      (RAB_REG_BASE + 0x44)
+#define RAB_INTR_ENAB_RPIO      (RAB_REG_BASE + 0x48)
+#define RAB_INTR_ENAB_IDME      (RAB_REG_BASE + 0x54)
+#define RAB_INTR_ENAB_ODME      (RAB_REG_BASE + 0x58)
+#define RAB_INTR_ENAB_MISC      (RAB_REG_BASE + 0x5c)
+
+#define RAB_INTR_STAT_APIO      (RAB_REG_BASE + 0x64)
+
+/* Data_streaming */
+#define RAB_INTR_ENAB_ODSE      (RAB_REG_BASE + 0x2a0c)
+#define RAB_INTR_ENAB_IBDS      (RAB_REG_BASE + 0x2a04)
+#define RAB_INTR_STAT_ODSE      (RAB_REG_BASE + 0x2a18)
+#define RAB_INTR_STAT_IBSE_VSID_M (RAB_REG_BASE + 0x2a10)
+
+/* PIO int status bits */
+#define APIO_TRANS_FAILED       (1 << 8)
+#define APIO_TRANS_COMPLETE     (1)
+#define RAB_INTR_ENAB_APIO_SET  (APIO_TRANS_FAILED)
+
+#define RAB_APIO_STAT           (RAB_REG_BASE + 0x184)
+#define  RAB_APIO_STAT_RQ_ERR     0x00000040
+#define  RAB_APIO_STAT_TO_ERR     0x00000020
+#define  RAB_APIO_STAT_RSP_ERR    0x00000010
+#define  RAB_APIO_STAT_MAP_ERR    0x00000008
+#define  RAB_APIO_STAT_MAINT_DIS  0x00000004
+#define  RAB_APIO_STAT_MEM_DIS    0x00000002
+#define  RAB_APIO_STAT_DISABLED   0x00000001
+
+#define RAB_INTR_STAT_RPIO      (RAB_REG_BASE + 0x68)
+#define RPIO_TRANS_FAILED       (1 << 8)
+#define RPIO_TRANS_COMPLETE     (1)
+#define RAB_INTR_ENAB_RPIO_SET  (RPIO_TRANS_FAILED | RPIO_TRANS_COMPLETE)
+
+#define RAB_INTR_STAT_MISC      (RAB_REG_BASE + 0x7c)
+/* Misc int status bits */
+#define UNEXP_MSG_LOG           (0xff << 24)
+#define USR_INT                 (1 << 16)
+#define AMST_INT                (1 << 11)
+#define ASLV_INT                (1 << 10)
+#define LFC_INT                 (1 << 9)
+#define CO_LOCK_INT             (1 << 8)
+#define LINK_REQ_INT            (1 << 7)
+#define LL_TL_INT               (1 << 6)
+#define GRIO_INT                (1 << 5)
+#define PORT_WRITE_INT          (1 << 4)
+#define UNSP_RIO_REQ_INT        (1 << 3)
+#define UNEXP_MSG_INT           (1 << 2)
+#define OB_DB_DONE_INT          (1 << 1)
+#define IB_DB_RCV_INT           (1)
+
+/* AMBA (AXI/AHB) Master/Slave */
+#define RAB_ASLV_STAT_CMD       (RAB_REG_BASE + 0x1c0)
+#define  RAB_ASLV_STAT_CMD_USUP 0x00000001
+
+#define RAB_ASLV_STAT_ADDR      (RAB_REG_BASE + 0x1c4)
+#define RAB_AMAST_STAT          (RAB_REG_BASE + 0x1e0)
+#define  RAB_AMAST_STAT_WRTO    0x00000020
+#define  RAB_AMAST_STAT_RDTO    0x00000010
+#define  RAB_AMAST_STAT_WRDE    0x00000008
+#define  RAB_AMAST_STAT_WRSE    0x00000004
+#define  RAB_AMAST_STAT_RDDE    0x00000002
+#define  RAB_AMAST_STAT_RDSE    0x00000001
+
+
+#define MISC_FATAL (AMST_INT | ASLV_INT)
+
+#if defined(CONFIG_AXXIA_RIO_STAT)
+
+#define MISC_ERROR_INDICATION (MISC_FATAL | GRIO_INT | LL_TL_INT | \
+			       UNEXP_MSG_LOG | UNSP_RIO_REQ_INT | \
+			       UNEXP_MSG_INT)
+#define MISC_DB_EVENT (OB_DB_DONE_INT | IB_DB_RCV_INT)
+
+#else
+
+#define MISC_ERROR_INDICATION MISC_FATAL
+#define MISC_DB_EVENT IB_DB_RCV_INT
+
+#endif
+
+#define RAB_INTR_ENAB_MISC_SET  (MISC_ERROR_INDICATION | MISC_DB_EVENT);
+
+/* Inbound/Outbound int bits */
+#define RAB_INTR_ENAB_IDME_SET  (~(0UL))
+#define RAB_INTR_ENAB_ODME_SET  (0x7)
+
+
+/************************************/
+/* *********** MESSAGES *********** */
+/************************************/
+/* Outbound Doorbell */
+#define RAB_OB_DB_CSR(n)        (RAB_REG_BASE + (0x400 + (n * 0x8)))
+#define OB_DB_DEST_ID(id)               (((u32)(id) & 0xffff) << 16)
+#define OB_DB_CRF                       (1 << 6)
+#define OB_DB_PRIO(prio)                (((u32)(prio) & 0x3) << 4)
+#define OB_DB_STATUS(reg)               (((u32)(reg) & 0xe) >> 1)
+#define OB_DB_SEND                      (1)
+
+#define OB_DB_STATUS_DONE       (0)
+#define OB_DB_STATUS_RETRY      (1)
+#define OB_DB_STATUS_ERROR      (2)
+#define OB_DB_STATUS_TIMEOUT    (3)
+#define OB_DB_STATUS_PENDING    (4)
+
+#define MAX_OB_DB               (8)
+
+#define RAB_OB_DB_INFO(n)       (RAB_REG_BASE + (0x404 + (n * 0x8)))
+#define OB_DB_INFO(info)                ((u32)(info) & 0xffff)
+
+/* Inbound Doorbell */
+#define RAB_IB_DB_CSR           (RAB_REG_BASE + 0x480)
+#define IB_DB_CSR_NUM_MSG(csr)          (((u32)(csr) & 0x3f0000) >> 16)
+#define IB_DB_CSR_EN                    (1)
+
+#define RAB_IB_DB_INFO          (RAB_REG_BASE + 0x484)
+
+#define DBELL_SID(info)		(((u32)(info) & 0xffff0000) >> 16)
+#define DBELL_INF(info)		((u32)(info) & 0xffff)
+
+/* Messages */
+#define RAB_OB_DME_CTRL(e)      (RAB_REG_BASE + (0x500 + (0x10 * (e))))
+#define RAB_OB_DME_DESC_ADDR(e) (RAB_REG_BASE + (0x504 + (0x10 * (e))))
+#define RAB_OB_DME_STAT(e)      (RAB_REG_BASE + (0x508 + (0x10 * (e))))
+#define RAB_OB_DME_DESC(e)      (RAB_REG_BASE + (0x50C + (0x10 * (e))))
+#define RAB_OB_DME_TID_MASK     (RAB_REG_BASE + 0x5f0)
+
+#define RAB_INTR_STAT_ODME      (RAB_REG_BASE + 0x78)
+#define OB_DME_STAT_SLEEPING             (1 << 9)
+#define OB_DME_STAT_TRANS_PEND           (1 << 8)
+#define OB_DME_STAT_RESP_TO              (1 << 7)
+#define OB_DME_STAT_RESP_ERR             (1 << 6)
+#define OB_DME_STAT_DATA_TRANS_ERR       (1 << 5)
+#define OB_DME_STAT_DESC_UPD_ERR         (1 << 4)
+#define OB_DME_STAT_DESC_ERR             (1 << 3)
+#define OB_DME_STAT_DESC_FETCH_ERR       (1 << 2)
+#define OB_DME_STAT_DESC_XFER_CPLT       (1 << 1)
+#define OB_DME_STAT_DESC_CHAIN_XFER_CPLT (1)
+
+#define OB_DME_STAT_ERROR_MASK           0x000000FC
+#define OB_DME_TID_MASK                  0xFFFFFFFF
+
+#define RAB_IB_DME_CTRL(e)      (RAB_REG_BASE + (0x600 + (0x10 * (e))))
+#define   RAB_IB_DME_CTRL_XMBOX(m)           (((m) & 0x3c) << 6)
+#define   RAB_IB_DME_CTRL_MBOX(m)            (((m) & 0x03) << 6)
+#define   RAB_IB_DME_CTRL_LETTER(l)          (((l) & 0x03) << 4)
+#define RAB_IB_DME_DESC_ADDR(e) (RAB_REG_BASE + (0x604 + (0x10 * (e))))
+#define RAB_IB_DME_STAT(e)      (RAB_REG_BASE + (0x608 + (0x10 * (e))))
+#define RAB_IB_DME_DESC(e)      (RAB_REG_BASE + (0x60C + (0x10 * (e))))
+
+#define RAB_INTR_STAT_IDME      (RAB_REG_BASE + 0x74)
+#define IB_DME_STAT_SLEEPING             (1 << 9)
+#define IB_DME_STAT_TRANS_PEND           (1 << 8)
+#define IB_DME_STAT_MSG_TIMEOUT          (1 << 7)
+#define IB_DME_STAT_MSG_ERR              (1 << 6)
+#define IB_DME_STAT_DATA_TRANS_ERR       (1 << 5)
+#define IB_DME_STAT_DESC_UPDATE_ERR      (1 << 4)
+#define IB_DME_STAT_DESC_ERR             (1 << 3)
+#define IB_DME_STAT_DESC_FETCH_ERR       (1 << 2)
+#define IB_DME_STAT_DESC_XFER_CPLT       (1 << 1)
+#define IB_DME_STAT_DESC_CHAIN_XFER_CPLT (1)
+
+#define IB_DME_STAT_ERROR_MASK		0x000000FC
+
+#define DME_WAKEUP			(2)
+#define DME_ENABLE			(1)
+
+/* DME Message Descriptor Table */
+#define DESC_TABLE_W0_NDX(d)         (0x10 * (d))
+#define DESC_TABLE_W0_RAB_BASE(d)    (RAB_REG_BASE+0x10000+DESC_TABLE_W0_NDX(d))
+#define DESC_TABLE_W0(d)                (DESC_TABLE_W0_RAB_BASE(d) + 0x0)
+#define DESC_TABLE_W1(d)                (DESC_TABLE_W0_RAB_BASE(d) + 0x4)
+#define DESC_TABLE_W2(d)                (DESC_TABLE_W0_RAB_BASE(d) + 0x8)
+#define DESC_TABLE_W3(d)                (DESC_TABLE_W0_RAB_BASE(d) + 0xC)
+
+#define DESC_TABLE_W0_MEM_BASE(me, d)		\
+	(((u8 *)(me)->descriptors) + DESC_TABLE_W0_NDX(d))
+#define DESC_TABLE_W0_MEM(me, d)        (DESC_TABLE_W0_MEM_BASE(me, d) + 0x0)
+#define DESC_TABLE_W1_MEM(me, d)        (DESC_TABLE_W0_MEM_BASE(me, d) + 0x4)
+#define DESC_TABLE_W2_MEM(me, d)        (DESC_TABLE_W0_MEM_BASE(me, d) + 0x8)
+#define DESC_TABLE_W3_MEM(me, d)        (DESC_TABLE_W0_MEM_BASE(me, d) + 0xC)
+
+#define DME_DESC_DW0_SRC_DST_ID(id)     ((id) << 16)
+#define DME_DESC_DW0_GET_DST_ID(dw0)    (((dw0) >> 16) & 0xffff)
+#define DME_DESC_DW0_RIO_ERR            (1 << 11)
+#define DME_DESC_DW0_AXI_ERR            (1 << 10)
+#define DME_DESC_DW0_TIMEOUT_ERR        (1 << 9)
+#define DME_DESC_DW0_DONE               (1 << 8)
+#define DME_DESC_DW0_SZ_MASK            (3 << 4)
+#define DME_DESC_DW0_EN_INT             (1 << 3)
+#define DME_DESC_DW0_END_OF_CHAIN       (1 << 2)
+#define DME_DESC_DW0_NXT_DESC_VALID     (1 << 1)
+#define DME_DESC_DW0_VALID              (1)
+
+#define DESC_STATE_TO_ERRNO(s)		(s & DME_DESC_DW0_TIMEOUT_ERR ? \
+					 -ETIME : (s & (DME_DESC_DW0_RIO_ERR | \
+					 DME_DESC_DW0_AXI_ERR) ? -EPROTO : 0))
+
+#define DME_DESC_DW0_READY_MASK         0x00000F00
+#define DME_DESC_DW0_ERROR_MASK         0x00000E00
+#define DME_DESC_DW0_SEG(d)             ((d & DME_DESC_DW0_SZ_MASK) >> 4)
+#define DME_DESC_DW0_SIZE(s)            (s == 0 ? 512 : \
+					 (s == 1 ? 1024 :	\
+					  (s == 2 ? 2048 : 4096)))
+
+#define DME_DESC_DW1_PRIO(flags)        ((flags & 0x3) << 30)
+#define DME_DESC_DW1_CRF(flags)         ((flags & 0x4) << 27)
+#define DME_DESC_DW1_SEG_SIZE_256       (0x06 << 18)
+#define DME_DESC_DW1_XMBOX(m)           (((m) & 0x3c) << 2)
+#define DME_DESC_DW1_MBOX(m)            (((m) & 0x03) << 2)
+#define DME_DESC_DW1_LETTER(l)          ((l) & 0x03)
+#define DME_DESC_DW1_MSGLEN(s)          ((((s + 7) & ~7) >> 3) << 8) /* Round
+					 up and shift to make double word */
+#define DME_DESC_DW1_MSGLEN_F(d)        (((d) >> 8) & 0x3ff)
+#define DME_DESC_DW1_MSGLEN_B(ml)       ((ml) << 3) /* double words to bytes */
+#define DME_DESC_DW1_GET_LETTER(dw1)    ((dw1) & 0x03)
+#define DME_DESC_DW1_GET_MBOX(dw1)      ((dw1 >> 2) & 0x03)
+
+/***********************************/
+/* *********** RIO REG *********** */
+/***********************************/
+#define RIO_PLTOCCSR            0x120
+#define RIO_PRTOCCSR            0x124
+#define RIO_GCCSR		0x13c
+
+#define RIO_MNT_REQ_CSR(x)      (0x140+((x)*0x20))
+#define  RIO_MNT_REQ_MASK       0x00000007
+#define  RIO_MNT_REQ_RST        0x00000003
+#define  RIO_MNT_REQ_STAT       0x00000004
+
+#define RIO_MNT_RSP_CSR(x)      (0x144+((x)*0x20))
+#define  RIO_MNT_RSP_LS         0x0000001f
+#define  RIO_MNT_RSP_AS         0x000003e0
+#define  RIO_MNT_RSP_RV         0x80000000
+
+#define RIO_ACK_STS_CSR(x)      (0x148+((x)*0x20))
+#define  RIO_ACK_STS_IA         0x1f000000
+#define  RIO_ACK_STS_OUTA       0x00001f00
+#define  RIO_ACK_STS_OBA        0x0000001f
+
+#define RIO_ESCSR(x)            (0x158+((x)*0x20))
+#define  RIO_ESCSR_I2E		 0x40000000   /*RW*/
+#define  RIO_ESCSR_OPD           0x04000000   /*WOCL*/
+#define  RIO_ESCSR_OFE           0x02000000   /*WOCL*/
+#define  RIO_ESCSR_ODE           0x01000000   /*WOCL*/
+#define  RIO_ESCSR_ORE           0x00100000   /*WOCL*/
+#define  RIO_ESCSR_OR            0x00080000   /*R*/
+#define  RIO_ESCSR_ORS           0x00040000   /*R*/
+#define  RIO_ESCSR_OEE           0x00020000   /*WOCL*/
+#define  RIO_ESCSR_OES           0x00010000   /*R--*/
+#define  RIO_ESCSR_IRS           0x00000400   /*R*/
+#define  RIO_ESCSR_IEE           0x00000200   /*WOCL*/
+#define  RIO_ESCSR_IES           0x00000100   /*R--*/
+#define  RIO_ESCSR_PWP           0x00000010   /*R*/
+#define  RIO_ESCSR_PE            0x00000004   /*WOCL*/
+#define  RIO_ESCSR_PO            0x00000002   /*R*/
+#define  RIO_ESCSR_PU            0x00000001   /*R*/
+#define  RIO_EXCSR_WOLR          (RIO_ESCSR_OPD | RIO_ESCSR_OFE | \
+				  RIO_ESCSR_ODE | RIO_ESCSR_ORE | \
+				  RIO_ESCSR_OEE | RIO_ESCSR_IEE | RIO_ESCSR_PE)
+
+#define ESCSR_FATAL (RIO_ESCSR_OFE |		\
+		     RIO_ESCSR_IES |		\
+		     RIO_ESCSR_IRS |		\
+		     RIO_ESCSR_ORS |		\
+		     RIO_ESCSR_OES)
+
+#define RIO_CCSR(x)		(0x15c+((x)*0x20))
+#define  RIO_CCSR_PW             0xc0000000   /*R*/
+#define  RIO_CCSR_IPW            0x38000000   /*R*/
+#define  RIO_CCSR_PW_MASK        0x7
+#define  RIO_CCSR_PWO_SHIFT      24
+#define  RIO_CCSR_PWO            (RIO_CCSR_PW_MASK << RIO_CCSR_PWO_SHIFT)/*R/W*/
+#define  RIO_CCSR_FORCE_LANE0    (2 << RIO_CCSR_PWO_SHIFT)
+#define  RIO_CCSR_PD             0x00800000   /*R/W*/
+#define  RIO_CCSR_OPE            0x00400000   /*R/W*/
+#define  RIO_CCSR_IPE            0x00200000   /*R/W*/
+#define  RIO_CCSR_FCP            0x00040000   /*R/W*/
+#define  RIO_CCSR_EB             0x00020000   /*R*/
+#define  RIO_CCSR_SPF            0x00000008   /*R/W*/
+#define  RIO_CCSR_PL             0x00000002   /*R/W*/
+
+#define RIO_PNPTAACR		0x10120
+
+#define AXXIA_IBDME_INTERRUPT_MODE	0x1
+#define AXXIA_IBDME_TIMER_MODE		0x2
+/*************************************/
+/* *********** Constants *********** */
+/*************************************/
+
+#define RIO_OUTB_ATMU_WINDOWS   16
+
+#define LSI_AXXIA_RIO_COOKIE	0x41734230	/* aka 'AsR0' */
+
+/***********************************/
+/* *********** STRUCTS *********** */
+/***********************************/
+struct atmu_outb {
+	void __iomem *win;
+	struct rio_atmu_regs __iomem *atmu_regs;
+	struct resource *riores;
+	int in_use;
+};
+
+struct event_regs {
+	void __iomem *win;
+	u64 phy_reset_start;
+	u64 phy_reset_size;
+	u32 reg_addr;
+	u32 reg_mask;
+	int in_use;
+};
+
+struct rio_desc {
+	u32     d0;
+	u32     d1;
+	u32     d2;
+	u32     d3;
+};
+
+struct rio_priv {
+	u32     cookie;
+
+	struct mutex api_lock;
+
+
+	struct rio_mport *mport;
+	struct device *dev;
+	int  ndx;	/* From FDT description */
+	int  port_ndx;
+	u32  devid;     /* From GRIO register */
+	u32  devrev;    /* From GRIO register */
+
+	void __iomem *regs_win_fixed;
+	void __iomem *regs_win_paged;
+
+	int maint_win_id;
+	struct atmu_outb outb_atmu[RIO_OUTB_ATMU_WINDOWS];
+	struct resource acpres[ACP_MAX_RESOURCES];
+
+	int intern_msg_desc;
+	int desc_max_entries;
+
+	/* Chip-specific DME availability */
+	int num_outb_dmes[2];	/* [0]=MSeg, [1]=Sseg */
+	int outb_dmes_in_use[2];
+	int outb_dmes[2];	/* set of defined outbound DMEs:
+				 *   [0]=MSeg, [1]=SSeg */
+	int num_inb_dmes[2];	/* [0]=MSeg, [1]=Sseg */
+	int inb_dmes_in_use[2];
+	int inb_dmes[2];	/* set of defined inbound DMEs */
+
+	struct rio_tx_dme      ob_dme_shared[DME_MAX_OB_ENGINES];
+	struct rio_tx_mbox     *ob_mbox[RIO_MAX_TX_MBOX];
+	struct rio_rx_mbox     *ib_mbox[RIO_MAX_RX_MBOX];
+	struct rio_msg_dme     *ib_dme[DME_MAX_IB_ENGINES];
+	struct rio_pw_irq *pw_data;
+	unsigned int dme_mode;
+	/* Linkdown Reset; Trigger via SRDS STAT1 */
+	struct event_regs linkdown_reset;
+
+	/* Interrupts */
+	int irq_line;
+	struct rio_irq_handler misc_irq;
+	struct rio_irq_handler linkdown_irq; /* AXM55xx+SRDS STAT1+APB2SER */
+	struct rio_irq_handler apio_irq;
+	struct rio_irq_handler rpio_irq;
+	struct rio_irq_handler ob_dme_irq;
+	struct rio_irq_handler ib_dme_irq;
+
+#ifdef CONFIG_AXXIA_RIO_STAT
+	unsigned int rpio_compl_count;
+	unsigned int rpio_failed_count;
+	unsigned int apio_compl_count;
+	unsigned int apio_failed_count;
+	unsigned int rio_pw_count;
+	unsigned int rio_pw_msg_count;
+#endif
+#ifdef CONFIG_RAPIDIO_HOTPLUG
+	/* Fatal err */
+	spinlock_t port_lock;
+	void (*port_notify_cb)(struct rio_mport *mport);
+#endif
+#ifdef CONFIG_AXXIA_RIO_DS
+	/* Data_streaming */
+	struct axxia_rio_ds_priv     ds_priv_data;
+	struct axxia_rio_ds_cfg      ds_cfg_data;
+#endif
+} ____cacheline_internodealigned_in_smp;
+
+
+/**********************************************/
+/* *********** External Functions *********** */
+/**********************************************/
+
+extern int axxia_rio_start_port(struct rio_mport *mport);
+extern void axxia_rio_set_mport_disc_mode(struct rio_mport *mport);
+extern void axxia_rio_static_win_release(struct rio_mport *mport);
+extern int axxia_rio_static_win_init(struct rio_mport *mport);
+
+extern int axxia_local_config_read(struct rio_priv *priv,
+				   u32 offset, u32 *data);
+extern int axxia_local_config_write(struct rio_priv *priv,
+				    u32 offset, u32 data);
+
+#ifdef CONFIG_RAPIDIO_HOTPLUG
+
+extern int axxia_rio_hotswap(struct rio_mport *mport, u8 flags);
+
+#endif /* CONFIG_RAPIDIO_HOTPLUG */
+
+#endif  /* _AXXIA_RIO_H_ */
diff --git a/include/linux/rio.h b/include/linux/rio.h
index a32149d..2883d52 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -25,6 +25,12 @@
 #include <linux/dmaengine.h>
 #endif
 
+#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
+typedef unsigned long long phys_t;
+#else
+typedef unsigned long phys_t;
+#endif
+
 #define RIO_NO_HOPCOUNT		-1
 #define RIO_INVALID_DESTID	0xffff
 
-- 
1.7.9.5



More information about the linux-yocto mailing list