Import MoCA 2.10.6.20

From MoCA2_2.10.6.20.tar and MoCA2_2.10.6.20_68xx.tar
cp ../drivers/moca2/bmoca/3.3/bmoca.c ./3.3/bmoca.c
cp ../drivers/moca2/bmoca/3.8/bmoca.c ./3.8/bmoca.c
cp ../drivers/moca2/bmoca/bmoca-6802.c .
cp ../drivers/moca2/include/linux/bmoca.h .
cp -r ../drivers/moca2/bmoca/3.8-7145/ .

Change-Id: I73d2d4b280ccdc6af3fad802504356cdd8a1daab
diff --git a/3.3/bmoca.c b/3.3/bmoca.c
index b36c551..86b59b6 100644
--- a/3.3/bmoca.c
+++ b/3.3/bmoca.c
@@ -565,6 +565,12 @@
 #define I2C_RD(x)		MOCA_RD(x)
 #define I2C_WR(x, y)		MOCA_WR(x, y)
 
+#define moca_clk_enable   clk_enable
+#define moca_clk_disable  clk_disable
+#define moca_clk_set_rate clk_set_rate
+#define moca_clk_put      clk_put
+#define moca_clk_get      clk_get
+
 static void moca_hw_reset(struct moca_priv_data *priv)
 {
 	/* disable and clear all interrupts */
@@ -602,9 +608,9 @@
 static void moca_hw_init(struct moca_priv_data *priv, int action)
 {
 	if (action == MOCA_ENABLE && !priv->enabled) {
-		clk_enable(priv->clk);
-		clk_enable(priv->phy_clk);
-		clk_enable(priv->cpu_clk);
+		moca_clk_enable(priv->clk);
+		moca_clk_enable(priv->phy_clk);
+		moca_clk_enable(priv->cpu_clk);
 		priv->enabled = 1;
 	}
 
@@ -652,9 +658,9 @@
 
 	if (action == MOCA_DISABLE && priv->enabled) {
 		priv->enabled = 0;
-		clk_disable(priv->clk);
-		clk_disable(priv->phy_clk);
-		clk_disable(priv->cpu_clk);
+		moca_clk_disable(priv->clk);
+		moca_clk_disable(priv->phy_clk);
+		moca_clk_disable(priv->cpu_clk);
 	}
 }
 
@@ -1694,10 +1700,10 @@
 		return -EFAULT;
 
 	return 0;
-	}
+}
 
 static int moca_3450_set_reg(struct moca_priv_data *priv, unsigned int  *arg)
-	{
+{
 	struct moca_xfer x;
 	u32 val;
 
@@ -2018,6 +2024,32 @@
 	return 0;
 }
 
+static int moca_clk_ssc(struct moca_priv_data *priv,
+	unsigned int *arg)
+{
+
+#if defined(BCHP_CLKGEN_PLL_MOCA_PLL_SSC_MODE_CONTROL_HIGH)
+
+	unsigned int enable;
+
+	get_user(enable, arg);
+	if (enable)
+	{
+		BDEV_WR(BCHP_CLKGEN_PLL_MOCA_PLL_SSC_MODE_CONTROL_HIGH, 0x00005B06);
+		BDEV_WR_F(CLKGEN_PLL_MOCA_PLL_SSC_MODE_CONTROL_LOW, SSC_LIMIT, 0x10000);
+		BDEV_WR_F(CLKGEN_PLL_MOCA_PLL_SSC_MODE_CONTROL_LOW, SSC_MODE, 0x1);
+	}
+	else
+	{
+		BDEV_WR(BCHP_CLKGEN_PLL_MOCA_PLL_SSC_MODE_CONTROL_HIGH, 0x00005B06);
+		BDEV_WR_F(CLKGEN_PLL_MOCA_PLL_SSC_MODE_CONTROL_LOW, SSC_LIMIT, 0x10000);
+		BDEV_WR_F(CLKGEN_PLL_MOCA_PLL_SSC_MODE_CONTROL_LOW, SSC_MODE, 0x0);
+	}
+
+#endif
+
+	return 0;
+}
 
 static long moca_file_ioctl(struct file *file, unsigned int cmd,
 	unsigned long arg)
@@ -2044,7 +2076,7 @@
 			&pd->macaddr_lo);
 #endif
 
-		clk_set_rate(priv->phy_clk, DEFAULT_PHY_CLOCK);
+		moca_clk_set_rate(priv->phy_clk, DEFAULT_PHY_CLOCK);
 
 		if (copy_from_user(&start, (void __user *)arg, sizeof(start)))
 			ret = -EFAULT;
@@ -2102,13 +2134,13 @@
 		if (!priv->cpu_clk)
 			ret = -EIO;
 		else
-			ret = clk_set_rate(priv->cpu_clk, (unsigned int)arg);
+			ret = moca_clk_set_rate(priv->cpu_clk, (unsigned int)arg);
 		break;
 	case MOCA_IOCTL_SET_PHY_RATE:
 		if (!priv->phy_clk)
 			ret = -EIO;
 		else
-			ret = clk_set_rate(priv->phy_clk, (unsigned int)arg);
+			ret = moca_clk_set_rate(priv->phy_clk, (unsigned int)arg);
 		break;
 	case MOCA_IOCTL_GET_3450_REG:
 		ret = moca_3450_get_reg(priv, (unsigned int *)arg);
@@ -2116,6 +2148,10 @@
 	case MOCA_IOCTL_SET_3450_REG:
 		ret = moca_3450_set_reg(priv, (unsigned int *)arg);
 		break;
+	case MOCA_IOCTL_CLK_SSC:
+		ret = moca_clk_ssc(priv, (unsigned int *)arg);
+		break;
+
 	}
 	mutex_unlock(&priv->dev_mutex);
 
@@ -2372,10 +2408,10 @@
 	priv->pdev = pdev;
 	priv->start_time = jiffies;
 
-	priv->clk = clk_get(&pdev->dev, "moca");
+	priv->clk = moca_clk_get(&pdev->dev, "moca");
 
-	priv->cpu_clk = clk_get(&pdev->dev, "moca-cpu");
-	priv->phy_clk = clk_get(&pdev->dev, "moca-phy");
+	priv->cpu_clk = moca_clk_get(&pdev->dev, "moca-cpu");
+	priv->phy_clk = moca_clk_get(&pdev->dev, "moca-phy");
 
 	priv->hw_rev = pd->hw_rev;
 
@@ -2527,9 +2563,9 @@
 	iounmap(priv->base);
 	kfree(priv);
 
-	clk_put(clk);
-	clk_put(phy_clk);
-	clk_put(cpu_clk);
+	moca_clk_put(clk);
+	moca_clk_put(phy_clk);
+	moca_clk_put(cpu_clk);
 
 	return 0;
 }
diff --git a/3.8-7145/bmoca.c b/3.8-7145/bmoca.c
new file mode 100644
index 0000000..8b1f7b2
--- /dev/null
+++ b/3.8-7145/bmoca.c
@@ -0,0 +1,2775 @@
+/*
+ * Copyright (C) 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt)            KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/poll.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/version.h>
+#include <linux/scatterlist.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/clk-brcmstb.h>
+#include <linux/netdevice.h>
+#include <linux/suspend.h>
+
+#define DRV_VERSION		0x00040000
+#define DRV_BUILD_NUMBER	0x20110831
+
+#if defined(CONFIG_BRCMSTB)
+#define MOCA6816		0
+#include <linux/bmoca.h>
+#elif defined(DSL_MOCA)
+#define MOCA6816		1
+#include "bmoca.h"
+#include <boardparms.h>
+#include <bcm3450.h>
+#include <linux/netdevice.h>
+#else
+#define MOCA6816		1
+#include <linux/bmoca.h>
+#endif
+
+#if defined(CONFIG_BRCMSTB)
+#include <linux/brcmstb/brcmstb.h>
+#endif
+
+#define MOCA_ENABLE		1
+#define MOCA_DISABLE		0
+
+#define OFF_PKT_REINIT_MEM	0x00a08000
+#define PKT_REINIT_MEM_SIZE	(32 * 1024)
+#define PKT_REINIT_MEM_END	(OFF_PKT_REINIT_MEM  + PKT_REINIT_MEM_SIZE)
+
+/* The mailbox layout is different for MoCA 2.0 compared to
+   MoCA 1.1 */
+
+/* MoCA 1.1 mailbox layout */
+#define HOST_REQ_SIZE_11        304
+#define HOST_RESP_SIZE_11       256
+#define CORE_REQ_SIZE_11        400
+#define CORE_RESP_SIZE_11       64
+
+/* MoCA 1.1 offsets from the mailbox pointer */
+#define HOST_REQ_OFFSET_11      0
+#define HOST_RESP_OFFSET_11     (HOST_REQ_OFFSET_11 + HOST_REQ_SIZE_11)
+#define CORE_REQ_OFFSET_11      (HOST_RESP_OFFSET_11 + HOST_RESP_SIZE_11)
+#define CORE_RESP_OFFSET_11     (CORE_REQ_OFFSET_11 + CORE_REQ_SIZE_11)
+
+/* MoCA 2.0 mailbox layout */
+#define HOST_REQ_SIZE_20        512
+#define HOST_RESP_SIZE_20       512
+#define CORE_REQ_SIZE_20        512
+#define CORE_RESP_SIZE_20       512
+
+/* MoCA 2.0 offsets from the mailbox pointer */
+#define HOST_REQ_OFFSET_20      0
+#define HOST_RESP_OFFSET_20     (HOST_REQ_OFFSET_20 + 0)
+#define CORE_REQ_OFFSET_20      (HOST_RESP_OFFSET_20 + HOST_RESP_SIZE_20)
+#define CORE_RESP_OFFSET_20     (CORE_REQ_OFFSET_20 + 0)
+
+#define HOST_REQ_SIZE_MAX       HOST_REQ_SIZE_20
+#define CORE_REQ_SIZE_MAX       CORE_REQ_SIZE_20
+#define CORE_RESP_SIZE_MAX      CORE_RESP_SIZE_20
+
+/* local H2M, M2H buffers */
+#define NUM_CORE_MSG		32
+#define NUM_HOST_MSG		8
+
+#define FW_CHUNK_SIZE		4096
+#define MAX_BL_CHUNKS		8
+#define MAX_FW_SIZE		(1024 * 1024)
+#define MAX_FW_PAGES		((MAX_FW_SIZE >> PAGE_SHIFT) + 1)
+#define MAX_LAB_PRINTF		104
+
+#ifdef __LITTLE_ENDIAN
+#define M2M_WRITE		(BIT(31) | BIT(27) | BIT(28))
+#define M2M_READ		(BIT(30) | BIT(27) | BIT(28))
+#else
+#define M2M_WRITE		(BIT(31) | BIT(27))
+#define M2M_READ		(BIT(30) | BIT(27))
+#endif
+
+#define RESET_HIGH_CPU		BIT(0)
+#define RESET_MOCA_SYS		BIT(1)
+#define RESET_LOW_CPU		BIT(2)
+#define RESET_GMII		BIT(3)
+#define RESET_PHY_0		BIT(4)
+#define RESET_PHY_1		BIT(5)
+#define DISABLE_CLOCKS		BIT(7)
+#define DISABLE_PHY_0_CLOCK	BIT(8)
+#define DISABLE_PHY_1_CLOCK	BIT(9)
+
+#define M2M_TIMEOUT_MS		10
+
+#define NO_FLUSH_IRQ		0
+#define FLUSH_IRQ		1
+#define FLUSH_DMA_ONLY		2
+#define FLUSH_REQRESP_ONLY	3
+
+#define DEFAULT_PHY_CLOCK	(300 * 1000000)
+
+/* DMA buffers may not share a cache line with anything else */
+#define __DMA_ALIGN__		__aligned(L1_CACHE_BYTES)
+
+struct moca_host_msg {
+	u32			data[HOST_REQ_SIZE_MAX / 4] __DMA_ALIGN__;
+	struct list_head	chain __DMA_ALIGN__;
+	u32			len;
+};
+
+struct moca_core_msg {
+	u32			data[CORE_REQ_SIZE_MAX / 4] __DMA_ALIGN__;
+	struct list_head	chain __DMA_ALIGN__;
+	u32			len;
+};
+
+struct moca_regs {
+	unsigned int		data_mem_offset;
+	unsigned int		data_mem_size;
+	unsigned int		cntl_mem_size;
+	unsigned int		cntl_mem_offset;
+	unsigned int		gp0_offset;
+	unsigned int		gp1_offset;
+	unsigned int		ringbell_offset;
+	unsigned int		l2_status_offset;
+	unsigned int		l2_clear_offset;
+	unsigned int		l2_mask_set_offset;
+	unsigned int		l2_mask_clear_offset;
+	unsigned int		sw_reset_offset;
+	unsigned int		led_ctrl_offset;
+	unsigned int		m2m_src_offset;
+	unsigned int		m2m_dst_offset;
+	unsigned int		m2m_cmd_offset;
+	unsigned int		m2m_status_offset;
+	unsigned int		moca2host_mmp_inbox_0_offset;
+	unsigned int		moca2host_mmp_inbox_1_offset;
+	unsigned int		moca2host_mmp_inbox_2_offset;
+	unsigned int		h2m_resp_bit[2]; /* indexed by cpu */
+	unsigned int		h2m_req_bit[2]; /* indexed by cpu */
+	unsigned int		sideband_gmii_fc_offset;
+};
+
+struct moca_priv_data {
+	struct platform_device	*pdev;
+	struct device		*dev;
+
+	unsigned int		minor;
+	int			irq;
+	struct work_struct	work;
+	void __iomem		*base;
+	void __iomem		*i2c_base;
+	struct platform_device	*enet_pdev;
+
+	unsigned int		mbx_offset[2]; /* indexed by MoCA cpu */
+	struct page		*fw_pages[MAX_FW_PAGES];
+	struct scatterlist	fw_sg[MAX_FW_PAGES];
+	struct completion	copy_complete;
+	struct completion	chunk_complete;
+
+	struct list_head	host_msg_free_list;
+	struct list_head	host_msg_pend_list;
+	struct moca_host_msg	host_msg_queue[NUM_HOST_MSG] __DMA_ALIGN__;
+	wait_queue_head_t	host_msg_wq;
+
+	struct list_head	core_msg_free_list;
+	struct list_head	core_msg_pend_list;
+	u32		core_resp_buf[CORE_RESP_SIZE_MAX / 4] __DMA_ALIGN__;
+	struct moca_core_msg	core_msg_queue[NUM_CORE_MSG] __DMA_ALIGN__;
+	struct moca_core_msg	core_msg_temp __DMA_ALIGN__;
+	wait_queue_head_t	core_msg_wq;
+
+	spinlock_t		list_lock;
+	spinlock_t		clock_lock;
+	spinlock_t		irq_status_lock;
+	struct mutex		dev_mutex;
+	struct mutex		copy_mutex;
+	struct mutex		moca_i2c_mutex;
+	int			host_mbx_busy;
+	int			host_resp_pending;
+	int			core_req_pending;
+	int			assert_pending;
+	int			wdt_pending;
+
+	int			enabled;
+	int			running;
+	int			wol_enabled;
+	struct clk		*clk;
+	struct clk		*phy_clk;
+	struct clk		*cpu_clk;
+
+	int			refcount;
+	unsigned long		start_time;
+	dma_addr_t		tpcap_buf_phys;
+
+	unsigned int		bonded_mode;
+	unsigned int		phy_freq;
+
+	unsigned int		hw_rev;
+
+	const struct moca_regs	*regs;
+
+	/* MMP Parameters */
+	unsigned int		mmp_20;
+	unsigned int		host_req_size;
+	unsigned int		host_resp_size;
+	unsigned int		core_req_size;
+	unsigned int		core_resp_size;
+	unsigned int		host_req_offset;
+	unsigned int		host_resp_offset;
+	unsigned int		core_req_offset;
+	unsigned int		core_resp_offset;
+
+	/* for user space suspend/resume notifications */
+	struct notifier_block	pm_notifier;
+	enum moca_pm_states	state;
+	struct completion	suspend_complete;
+};
+
+static const struct moca_regs regs_11_plus = {
+	.data_mem_offset		= 0,
+	.data_mem_size			= (256 * 1024),
+	.cntl_mem_offset		= 0x00040000,
+	.cntl_mem_size			= (128 * 1024),
+	.gp0_offset			= 0x000a2050,
+	.gp1_offset			= 0x000a2054,
+	.ringbell_offset		= 0x000a2060,
+	.l2_status_offset		= 0x000a2080,
+	.l2_clear_offset		= 0x000a2088,
+	.l2_mask_set_offset		= 0x000a2090,
+	.l2_mask_clear_offset		= 0x000a2094,
+	.sw_reset_offset		= 0x000a2040,
+	.led_ctrl_offset		= 0x000a204c,
+	.led_ctrl_offset		= 0x000a204c,
+	.m2m_src_offset			= 0x000a2000,
+	.m2m_dst_offset			= 0x000a2004,
+	.m2m_cmd_offset			= 0x000a2008,
+	.m2m_status_offset		= 0x000a200c,
+	.h2m_resp_bit[1]		= 0x1,
+	.h2m_req_bit[1]			= 0x2,
+	.sideband_gmii_fc_offset	= 0x000a1420
+};
+
+static const struct moca_regs regs_11_lite = {
+	.data_mem_offset		= 0,
+	.data_mem_size			= (96 * 1024),
+	.cntl_mem_offset		= 0x0004c000,
+	.cntl_mem_size			= (80 * 1024),
+	.gp0_offset			= 0x000a2050,
+	.gp1_offset			= 0x000a2054,
+	.ringbell_offset		= 0x000a2060,
+	.l2_status_offset		= 0x000a2080,
+	.l2_clear_offset		= 0x000a2088,
+	.l2_mask_set_offset		= 0x000a2090,
+	.l2_mask_clear_offset		= 0x000a2094,
+	.sw_reset_offset		= 0x000a2040,
+	.led_ctrl_offset		= 0x000a204c,
+	.led_ctrl_offset		= 0x000a204c,
+	.m2m_src_offset			= 0x000a2000,
+	.m2m_dst_offset			= 0x000a2004,
+	.m2m_cmd_offset			= 0x000a2008,
+	.m2m_status_offset		= 0x000a200c,
+	.h2m_resp_bit[1]		= 0x1,
+	.h2m_req_bit[1]			= 0x2,
+	.sideband_gmii_fc_offset	= 0x000a1420
+};
+
+static const struct moca_regs regs_11 = {
+	.data_mem_offset		= 0,
+	.data_mem_size			= (256 * 1024),
+	.cntl_mem_offset		= 0x0004c000,
+	.cntl_mem_size			= (80 * 1024),
+	.gp0_offset			= 0x000a2050,
+	.gp1_offset			= 0x000a2054,
+	.ringbell_offset		= 0x000a2060,
+	.l2_status_offset		= 0x000a2080,
+	.l2_clear_offset		= 0x000a2088,
+	.l2_mask_set_offset		= 0x000a2090,
+	.l2_mask_clear_offset		= 0x000a2094,
+	.sw_reset_offset		= 0x000a2040,
+	.led_ctrl_offset		= 0x000a204c,
+	.m2m_src_offset			= 0x000a2000,
+	.m2m_dst_offset			= 0x000a2004,
+	.m2m_cmd_offset			= 0x000a2008,
+	.m2m_status_offset		= 0x000a200c,
+	.h2m_resp_bit[1]		= 0x1,
+	.h2m_req_bit[1]			= 0x2,
+	.sideband_gmii_fc_offset	= 0x000a1420
+};
+
+static const struct moca_regs regs_20 = {
+	.data_mem_offset		= 0,
+	.data_mem_size			= (288 * 1024),
+	.cntl_mem_offset		= 0x00120000,
+	.cntl_mem_size			= (384 * 1024),
+	.gp0_offset			= 0,
+	.gp1_offset			= 0,
+	.ringbell_offset		= 0x001ffd0c,
+	.l2_status_offset		= 0x001ffc40,
+	.l2_clear_offset		= 0x001ffc48,
+	.l2_mask_set_offset		= 0x001ffc50,
+	.l2_mask_clear_offset		= 0x001ffc54,
+	.sw_reset_offset		= 0x001ffd00,
+	.led_ctrl_offset		= 0,
+	.m2m_src_offset			= 0x001ffc00,
+	.m2m_dst_offset			= 0x001ffc04,
+	.m2m_cmd_offset			= 0x001ffc08,
+	.m2m_status_offset		= 0x001ffc0c,
+	.moca2host_mmp_inbox_0_offset	= 0x001ffd58,
+	.moca2host_mmp_inbox_1_offset	= 0x001ffd5c,
+	.moca2host_mmp_inbox_2_offset	= 0x001ffd60,
+	.h2m_resp_bit[1]		= 0x10,
+	.h2m_req_bit[1]			= 0x20,
+	.h2m_resp_bit[0]		= 0x1,
+	.h2m_req_bit[0]			= 0x2,
+	.sideband_gmii_fc_offset	= 0x001fec18
+};
+
+#define MOCA_FW_MAGIC		0x4d6f4341
+
+struct moca_fw_hdr {
+	uint32_t		jump[2];
+	uint32_t		length;
+	uint32_t		cpuid;
+	uint32_t		magic;
+	uint32_t		hw_rev;
+	uint32_t		bl_chunks;
+	uint32_t		res1;
+};
+
+struct bsc_regs {
+	u32			chip_address;
+	u32			data_in[8];
+	u32			cnt_reg;
+	u32			ctl_reg;
+	u32			iic_enable;
+	u32			data_out[8];
+	u32			ctlhi_reg;
+	u32			scl_param;
+};
+
+static const char * const __maybe_unused moca_state_string[] = {
+	[MOCA_ACTIVE] = "active",
+	[MOCA_SUSPENDING] = "suspending",
+	[MOCA_SUSPENDING_WAITING_ACK] = "suspending waiting for ACK",
+	[MOCA_SUSPENDING_GOT_ACK] = "suspending got ACK",
+	[MOCA_SUSPENDED] = "suspended",
+	[MOCA_RESUMING] = "resuming",
+};
+
+/* support for multiple MoCA devices */
+#define NUM_MINORS		8
+static struct moca_priv_data *minor_tbl[NUM_MINORS];
+static struct class *moca_class;
+
+/* character major device number */
+#define MOCA_MAJOR		234
+#define MOCA_CLASS		"bmoca"
+
+#define M2H_RESP		BIT(0)
+#define M2H_REQ			BIT(1)
+#define M2H_ASSERT		BIT(2)
+#define M2H_NEXTCHUNK		BIT(3)
+#define M2H_NEXTCHUNK_CPU0	BIT(4)
+#define M2H_WDT_CPU0		BIT(6)
+#define M2H_WDT_CPU1		BIT(10)
+#define M2H_DMA			BIT(11)
+
+#define M2H_RESP_CPU0		BIT(13)
+#define M2H_REQ_CPU0		BIT(14)
+#define M2H_ASSERT_CPU0		BIT(15)
+
+/* does this word contain a NIL byte (i.e. end of string)? */
+#define HAS0(x)			((((x) & 0xff) == 0) || \
+				 (((x) & 0xff00) == 0) || \
+				 (((x) & 0xff0000) == 0) || \
+				 (((x) & 0xff000000) == 0))
+
+#define MOCA_SET(x, y)	 MOCA_WR(x, MOCA_RD(x) | (y))
+#define MOCA_UNSET(x, y) MOCA_WR(x, MOCA_RD(x) & ~(y))
+
+static void moca_3450_write_i2c(struct moca_priv_data *priv, u8 addr,
+				u32 data);
+static u32 moca_3450_read_i2c(struct moca_priv_data *priv, u8 addr);
+static int moca_get_mbx_offset(struct moca_priv_data *priv);
+
+#define INRANGE(x, a, b)	(((x) >= (a)) && ((x) < (b)))
+
+static inline int moca_range_ok(struct moca_priv_data *priv,
+	unsigned long offset, unsigned long len)
+{
+	const struct moca_regs *r = priv->regs;
+	unsigned long lastad = offset + len - 1;
+
+	if (lastad < offset)
+		return -EINVAL;
+
+	if (INRANGE(offset, r->cntl_mem_offset,
+		    r->cntl_mem_offset + r->cntl_mem_size) &&
+	    INRANGE(lastad, r->cntl_mem_offset,
+		    r->cntl_mem_offset + r->cntl_mem_size))
+		return 0;
+
+	if (INRANGE(offset, r->data_mem_offset,
+		    r->data_mem_offset + r->data_mem_size) &&
+	    INRANGE(lastad, r->data_mem_offset,
+		    r->data_mem_offset + r->data_mem_size))
+		return 0;
+
+	if (INRANGE(offset, OFF_PKT_REINIT_MEM, PKT_REINIT_MEM_END) &&
+	    INRANGE(lastad, OFF_PKT_REINIT_MEM, PKT_REINIT_MEM_END))
+		return 0;
+
+	return -EINVAL;
+}
+
+static void moca_mmp_init(struct moca_priv_data *priv, int is20)
+{
+	if (is20) {
+		priv->host_req_size    = HOST_REQ_SIZE_20;
+		priv->host_resp_size   = HOST_RESP_SIZE_20;
+		priv->core_req_size    = CORE_REQ_SIZE_20;
+		priv->core_resp_size   = CORE_RESP_SIZE_20;
+		priv->host_req_offset  = HOST_REQ_OFFSET_20;
+		priv->host_resp_offset = HOST_RESP_OFFSET_20;
+		priv->core_req_offset  = CORE_REQ_OFFSET_20;
+		priv->core_resp_offset = CORE_RESP_OFFSET_20;
+		priv->mmp_20 = 1;
+	} else {
+		priv->host_req_size    = HOST_REQ_SIZE_11;
+		priv->host_resp_size   = HOST_RESP_SIZE_11;
+		priv->core_req_size    = CORE_REQ_SIZE_11;
+		priv->core_resp_size   = CORE_RESP_SIZE_11;
+		priv->host_req_offset  = HOST_REQ_OFFSET_11;
+		priv->host_resp_offset = HOST_RESP_OFFSET_11;
+		priv->core_req_offset  = CORE_REQ_OFFSET_11;
+		priv->core_resp_offset = CORE_RESP_OFFSET_11;
+		priv->mmp_20 = 0;
+	}
+}
+
+static int moca_is_20(struct moca_priv_data *priv)
+{
+	return (priv->hw_rev & MOCA_PROTVER_MASK) == MOCA_PROTVER_20;
+}
+
+#ifdef CONFIG_BRCM_MOCA_BUILTIN_FW
+#error Not supported in this version
+#else
+static const char *bmoca_fw_image;
+#endif
+
+/*
+ * LOW-LEVEL DEVICE OPERATIONS
+ */
+
+#define MOCA_RD(x)		__raw_readl((void __iomem *)(x))
+#define MOCA_WR(x, y)		__raw_writel((y), (void __iomem *)(x))
+
+#define I2C_RD(x)		MOCA_RD(x)
+#define I2C_WR(x, y)		MOCA_WR(x, y)
+
+#define moca_clk_enable   clk_prepare_enable
+#define moca_clk_disable  clk_disable_unprepare
+#define moca_clk_set_rate clk_set_rate
+#define moca_clk_put      clk_put
+#define moca_clk_get      clk_get
+
+static void moca_hw_reset(struct moca_priv_data *priv)
+{
+	const struct moca_regs *r = priv->regs;
+
+	/* disable and clear all interrupts */
+	MOCA_WR(priv->base + r->l2_mask_set_offset, 0xffffffff);
+	MOCA_RD(priv->base + r->l2_mask_set_offset);
+
+	/* assert resets */
+
+	/* reset CPU first, both CPUs for MoCA 20 HW */
+	MOCA_SET(priv->base + r->sw_reset_offset, RESET_HIGH_CPU |
+		 (moca_is_20(priv) ? RESET_LOW_CPU : 0));
+	MOCA_RD(priv->base + r->sw_reset_offset);
+
+	udelay(20);
+
+	/* reset everything else except clocks */
+	MOCA_SET(priv->base + r->sw_reset_offset,
+		 ~(RESET_GMII | DISABLE_CLOCKS));
+	MOCA_RD(priv->base + r->sw_reset_offset);
+
+	/* disable clocks */
+	MOCA_SET(priv->base + r->sw_reset_offset, ~RESET_GMII);
+	MOCA_RD(priv->base + r->sw_reset_offset);
+
+	MOCA_WR(priv->base + r->l2_clear_offset, 0xffffffff);
+	MOCA_RD(priv->base + r->l2_clear_offset);
+}
+
+/* called any time we start/restart/stop MoCA */
+static void moca_hw_init(struct moca_priv_data *priv, int action)
+{
+	const struct moca_regs *r = priv->regs;
+	int clk_status = 0;
+
+	if (action == MOCA_ENABLE && !priv->enabled) {
+		clk_status = moca_clk_enable(priv->clk);
+		if (clk_status != 0) {
+			dev_err(priv->dev, "moca clk enable failed\n");
+			goto clk_err_chk;
+		}
+
+		clk_status = moca_clk_enable(priv->phy_clk);
+		if (clk_status != 0) {
+			dev_err(priv->dev, "moca phy clk enable failed\n");
+			goto clk_err_chk;
+		}
+		clk_status = moca_clk_enable(priv->cpu_clk);
+		if (clk_status != 0)
+			dev_err(priv->dev, "moca cpu clk enable failed\n");
+
+clk_err_chk:
+		priv->enabled = clk_status ? 0 : 1;
+	}
+
+	/* clock not enabled, register accesses will fail with bus error */
+	if (!priv->enabled)
+		return;
+
+	moca_hw_reset(priv);
+	udelay(1);
+
+	if (action == MOCA_ENABLE) {
+		/* deassert moca_sys_reset and clock */
+		MOCA_UNSET(priv->base + r->sw_reset_offset,
+			   RESET_MOCA_SYS | DISABLE_CLOCKS);
+
+		if (priv->hw_rev >= HWREV_MOCA_20_GEN22) {
+			/* Take PHY0 out of reset and enable clock */
+			MOCA_UNSET(priv->base + r->sw_reset_offset,
+				   RESET_PHY_0 | DISABLE_PHY_0_CLOCK);
+
+			if (priv->bonded_mode) {
+				/* Take PHY1 out of reset and enable clock */
+				MOCA_UNSET(priv->base + r->sw_reset_offset,
+					   RESET_PHY_1 | DISABLE_PHY_1_CLOCK);
+			}
+		}
+		MOCA_RD(priv->base + r->sw_reset_offset);
+	}
+
+	if (!moca_is_20(priv)) {
+		/* clear junk out of GP0/GP1 */
+		MOCA_WR(priv->base + r->gp0_offset, 0xffffffff);
+		MOCA_WR(priv->base + r->gp1_offset, 0x0);
+		/* set up activity LED for 50% duty cycle */
+		MOCA_WR(priv->base + r->led_ctrl_offset, 0x40004000);
+	}
+
+	/* enable DMA completion interrupts */
+	MOCA_WR(priv->base + r->ringbell_offset, 0);
+	MOCA_WR(priv->base + r->l2_mask_clear_offset, M2H_DMA);
+	MOCA_RD(priv->base + r->l2_mask_clear_offset);
+
+	if (action == MOCA_DISABLE && priv->enabled) {
+		priv->enabled = 0;
+		moca_clk_disable(priv->cpu_clk);
+		moca_clk_disable(priv->phy_clk);
+		moca_clk_disable(priv->clk);
+	}
+}
+
+static void moca_ringbell(struct moca_priv_data *priv, u32 mask)
+{
+	const struct moca_regs *r = priv->regs;
+
+	MOCA_WR(priv->base + r->ringbell_offset, mask);
+	MOCA_RD(priv->base + r->ringbell_offset);
+}
+
+static u32 moca_irq_status(struct moca_priv_data *priv, int flush)
+{
+	const struct moca_regs *r = priv->regs;
+	u32 stat, dma_mask = M2H_DMA | M2H_NEXTCHUNK;
+	unsigned long flags;
+
+	if (moca_is_20(priv))
+		dma_mask |= M2H_NEXTCHUNK_CPU0;
+
+	spin_lock_irqsave(&priv->irq_status_lock, flags);
+
+	stat = MOCA_RD(priv->base + priv->regs->l2_status_offset);
+
+	if (flush == FLUSH_IRQ) {
+		MOCA_WR(priv->base + r->l2_clear_offset, stat);
+		MOCA_RD(priv->base + r->l2_clear_offset);
+	}
+	if (flush == FLUSH_DMA_ONLY) {
+		MOCA_WR(priv->base + r->l2_clear_offset,
+			stat & dma_mask);
+		MOCA_RD(priv->base + r->l2_clear_offset);
+	}
+	if (flush == FLUSH_REQRESP_ONLY) {
+		MOCA_WR(priv->base + r->l2_clear_offset,
+			stat & (M2H_RESP | M2H_REQ |
+			M2H_RESP_CPU0 | M2H_REQ_CPU0));
+		MOCA_RD(priv->base + r->l2_clear_offset);
+	}
+
+	spin_unlock_irqrestore(&priv->irq_status_lock, flags);
+
+	return stat;
+}
+
+static void moca_enable_irq(struct moca_priv_data *priv)
+{
+	const struct moca_regs *r = priv->regs;
+
+	/* unmask everything */
+	u32 mask = M2H_REQ | M2H_RESP | M2H_ASSERT | M2H_WDT_CPU1 |
+		M2H_NEXTCHUNK | M2H_DMA;
+
+	if (moca_is_20(priv))
+		mask |= M2H_WDT_CPU0 | M2H_NEXTCHUNK_CPU0 |
+			M2H_REQ_CPU0 | M2H_RESP_CPU0 | M2H_ASSERT_CPU0;
+
+	MOCA_WR(priv->base + r->l2_mask_clear_offset, mask);
+	MOCA_RD(priv->base + r->l2_mask_clear_offset);
+}
+
+static void moca_disable_irq(struct moca_priv_data *priv)
+{
+	const struct moca_regs *r = priv->regs;
+
+	/* mask everything except DMA completions */
+	u32 mask = M2H_REQ | M2H_RESP | M2H_ASSERT | M2H_WDT_CPU1 |
+		M2H_NEXTCHUNK;
+
+	if (moca_is_20(priv))
+		mask |= M2H_WDT_CPU0 | M2H_NEXTCHUNK_CPU0 |
+			M2H_REQ_CPU0 | M2H_RESP_CPU0 | M2H_ASSERT_CPU0;
+
+	MOCA_WR(priv->base + r->l2_mask_set_offset, mask);
+	MOCA_RD(priv->base + r->l2_mask_set_offset);
+}
+
+static u32 moca_start_mips(struct moca_priv_data *priv, u32 cpu)
+{
+	const struct moca_regs *r = priv->regs;
+
+	if (moca_is_20(priv)) {
+		if (cpu == 1)
+			MOCA_UNSET(priv->base + r->sw_reset_offset,
+				   RESET_HIGH_CPU);
+		else {
+			moca_mmp_init(priv, 1);
+			MOCA_UNSET(priv->base + r->sw_reset_offset,
+				   RESET_LOW_CPU);
+		}
+	} else
+		MOCA_UNSET(priv->base + r->sw_reset_offset, RESET_HIGH_CPU);
+	MOCA_RD(priv->base + r->sw_reset_offset);
+	return 0;
+}
+
+static void moca_m2m_xfer(struct moca_priv_data *priv,
+	u32 dst, u32 src, u32 ctl)
+{
+	const struct moca_regs *r = priv->regs;
+	u32 status;
+
+	MOCA_WR(priv->base + r->m2m_src_offset, src);
+	MOCA_WR(priv->base + r->m2m_dst_offset, dst);
+	MOCA_WR(priv->base + r->m2m_status_offset, 0);
+	MOCA_RD(priv->base + r->m2m_status_offset);
+	MOCA_WR(priv->base + r->m2m_cmd_offset, ctl);
+
+	if (wait_for_completion_timeout(&priv->copy_complete,
+		1000 * M2M_TIMEOUT_MS) <= 0) {
+		dev_warn(priv->dev, "DMA interrupt timed out, status %x\n",
+			 moca_irq_status(priv, NO_FLUSH_IRQ));
+	}
+
+	status = MOCA_RD(priv->base + r->m2m_status_offset);
+
+	if (status & (3 << 29))
+		dev_warn(priv->dev, "bad status %08x (s/d/c %08x %08x %08x)\n",
+			 status, src, dst, ctl);
+}
+
+static void moca_write_mem(struct moca_priv_data *priv,
+	u32 dst_offset, void *src, unsigned int len)
+{
+	dma_addr_t pa;
+
+	if (moca_range_ok(priv, dst_offset, len) < 0) {
+		dev_warn(priv->dev, "copy past end of cntl memory: %08x\n",
+			 dst_offset);
+		return;
+	}
+
+	pa = dma_map_single(&priv->pdev->dev, src, len, DMA_TO_DEVICE);
+	moca_m2m_xfer(priv, dst_offset + priv->regs->data_mem_offset, (u32)pa,
+		len | M2M_WRITE);
+	dma_unmap_single(&priv->pdev->dev, pa, len, DMA_TO_DEVICE);
+}
+
+static void moca_read_mem(struct moca_priv_data *priv,
+	void *dst, u32 src_offset, unsigned int len)
+{
+	int i;
+
+	if (moca_range_ok(priv, src_offset, len) < 0) {
+		dev_warn(priv->dev, "copy past end of cntl memory: %08x\n",
+			 src_offset);
+		return;
+	}
+
+	for (i = 0; i < len; i += 4)
+		DEV_WR(dst + i, cpu_to_be32(
+			MOCA_RD(priv->base + src_offset +
+				priv->regs->data_mem_offset + i)));
+}
+
+static void moca_write_sg(struct moca_priv_data *priv,
+	u32 dst_offset, struct scatterlist *sg, int nents)
+{
+	int j;
+	uintptr_t addr = priv->regs->data_mem_offset + dst_offset;
+
+	dma_map_sg(&priv->pdev->dev, sg, nents, DMA_TO_DEVICE);
+
+	for (j = 0; j < nents; j++) {
+		moca_m2m_xfer(priv, addr, (u32)sg[j].dma_address,
+			sg[j].length | M2M_WRITE);
+
+		addr += sg[j].length;
+	}
+
+	dma_unmap_sg(&priv->pdev->dev, sg, nents, DMA_TO_DEVICE);
+}
+
+static inline void moca_read_sg(struct moca_priv_data *priv,
+	u32 src_offset, struct scatterlist *sg, int nents)
+{
+	int j;
+	uintptr_t addr = priv->regs->data_mem_offset + src_offset;
+
+	dma_map_sg(&priv->pdev->dev, sg, nents, DMA_FROM_DEVICE);
+
+	for (j = 0; j < nents; j++) {
+		moca_m2m_xfer(priv, (u32)sg[j].dma_address, addr,
+			sg[j].length | M2M_READ);
+
+		addr += sg[j].length;
+		SetPageDirty(sg_page(&sg[j]));
+	}
+
+	dma_unmap_sg(&priv->pdev->dev, sg, nents, DMA_FROM_DEVICE);
+}
+
+#define moca_3450_write moca_3450_write_i2c
+#define moca_3450_read moca_3450_read_i2c
+
+static void moca_put_pages(struct moca_priv_data *priv, int pages)
+{
+	int i;
+
+	for (i = 0; i < pages; i++)
+		page_cache_release(priv->fw_pages[i]);
+}
+
+static int moca_get_pages(struct moca_priv_data *priv, unsigned long addr,
+	int size, unsigned int moca_addr, int write)
+{
+	unsigned int pages, chunk_size;
+	int ret, i;
+
+	if (addr & 3)
+		return -EINVAL;
+	if ((size <= 0) || (size > MAX_FW_SIZE))
+		return -EINVAL;
+
+	pages = ((addr & ~PAGE_MASK) + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+	down_read(&current->mm->mmap_sem);
+	ret = get_user_pages(current, current->mm, addr & PAGE_MASK, pages,
+		write, 0, priv->fw_pages, NULL);
+	up_read(&current->mm->mmap_sem);
+
+	if (ret < 0)
+		return ret;
+	BUG_ON((ret > MAX_FW_PAGES) || (pages == 0));
+
+	if (ret < pages) {
+		dev_warn(priv->dev,
+			 "get_user_pages returned %d expecting %d\n",
+			 ret, pages);
+		moca_put_pages(priv, ret);
+		return -EFAULT;
+	}
+
+	chunk_size = PAGE_SIZE - (addr & ~PAGE_MASK);
+	if (size < chunk_size)
+		chunk_size = size;
+
+	sg_set_page(&priv->fw_sg[0], priv->fw_pages[0], chunk_size,
+		addr & ~PAGE_MASK);
+	size -= chunk_size;
+
+	for (i = 1; i < pages; i++) {
+		sg_set_page(&priv->fw_sg[i], priv->fw_pages[i],
+			size > PAGE_SIZE ? PAGE_SIZE : size, 0);
+		size -= PAGE_SIZE;
+	}
+	return ret;
+}
+
+static int moca_write_img(struct moca_priv_data *priv, struct moca_xfer *x)
+{
+	int pages, i, ret = -EINVAL;
+	struct moca_fw_hdr hdr;
+	u32 bl_chunks;
+
+	if (copy_from_user(&hdr, (void __user *)(unsigned long)x->buf,
+			sizeof(hdr)))
+		return -EFAULT;
+
+	bl_chunks = be32_to_cpu(hdr.bl_chunks);
+	if (!bl_chunks || (bl_chunks > MAX_BL_CHUNKS))
+		bl_chunks = 1;
+
+	pages = moca_get_pages(priv, (unsigned long)x->buf, x->len, 0, 0);
+	if (pages < 0)
+		return pages;
+	if (pages < (bl_chunks + 2))
+		goto out;
+
+	/* host must use FW_CHUNK_SIZE MMU pages (for now) */
+	BUG_ON(FW_CHUNK_SIZE != PAGE_SIZE);
+
+	/* write the first two chunks, then start the MIPS */
+	moca_write_sg(priv, 0, &priv->fw_sg[0], bl_chunks + 1);
+	moca_enable_irq(priv);
+	moca_start_mips(priv, be32_to_cpu(hdr.cpuid));
+	ret = 0;
+
+	/* wait for an ACK, then write each successive chunk */
+	for (i = bl_chunks + 1; i < pages; i++) {
+		if (wait_for_completion_timeout(&priv->chunk_complete,
+				1000 * M2M_TIMEOUT_MS) <= 0) {
+			moca_disable_irq(priv);
+			dev_warn(priv->dev, "chunk ack timed out\n");
+			ret = -EIO;
+			goto out;
+		}
+		moca_write_sg(priv, priv->regs->data_mem_offset +
+			      FW_CHUNK_SIZE * bl_chunks,
+			      &priv->fw_sg[i], 1);
+	}
+
+	/* wait for ACK of last block.  Older firmware images didn't
+	   ACK the last block, so don't return an error */
+	wait_for_completion_timeout(&priv->chunk_complete,
+			1000 * M2M_TIMEOUT_MS / 10);
+
+out:
+	moca_put_pages(priv, pages);
+	return ret;
+}
+
+/*
+ * MESSAGE AND LIST HANDLING
+ */
+
+static void moca_handle_lab_printf(struct moca_priv_data *priv,
+	struct moca_core_msg *m)
+{
+	u32 str_len;
+	u32 str_addr;
+
+	if (priv->mmp_20) {
+		str_len = (be32_to_cpu(m->data[4]) + 3) & ~3;
+		str_addr = be32_to_cpu(m->data[3]) & 0x1fffffff;
+
+		if ((be32_to_cpu(m->data[0]) == 0x3) &&
+		    (be32_to_cpu(m->data[1]) == 12) &&
+		    ((be32_to_cpu(m->data[2]) & 0xffffff) == 0x090801) &&
+		    (be32_to_cpu(m->data[4]) <= MAX_LAB_PRINTF)) {
+			m->len = 3 + str_len;
+			moca_read_mem(priv, &m->data[3], str_addr, str_len);
+
+			m->data[1] = cpu_to_be32(m->len - 8);
+		}
+	} else {
+		str_len = (be32_to_cpu(m->data[3]) + 3) & ~3;
+		str_addr = be32_to_cpu(m->data[2]) & 0x1fffffff;
+
+		if ((be32_to_cpu(m->data[0]) & 0xff0000ff) == 0x09000001 &&
+			be32_to_cpu(m->data[1]) == 0x600b0008 &&
+			(be32_to_cpu(m->data[3]) <= MAX_LAB_PRINTF)) {
+
+			m->len = 8 + str_len;
+			moca_read_mem(priv, &m->data[2], str_addr, str_len);
+
+			m->data[1] = cpu_to_be32((MOCA_IE_DRV_PRINTF << 16) +
+				m->len - 8);
+		}
+	}
+}
+static void moca_msg_reset(struct moca_priv_data *priv)
+{
+	int i;
+
+	if (priv->running)
+		moca_disable_irq(priv);
+	priv->running = 0;
+	priv->host_mbx_busy = 0;
+	priv->host_resp_pending = 0;
+	priv->core_req_pending = 0;
+	priv->assert_pending = 0;
+	priv->mbx_offset[0] = -1;
+	priv->mbx_offset[1] = -1;
+
+	spin_lock_bh(&priv->list_lock);
+	INIT_LIST_HEAD(&priv->core_msg_free_list);
+	INIT_LIST_HEAD(&priv->core_msg_pend_list);
+
+	for (i = 0; i < NUM_CORE_MSG; i++)
+		list_add_tail(&priv->core_msg_queue[i].chain,
+			&priv->core_msg_free_list);
+
+	INIT_LIST_HEAD(&priv->host_msg_free_list);
+	INIT_LIST_HEAD(&priv->host_msg_pend_list);
+
+	for (i = 0; i < NUM_HOST_MSG; i++)
+		list_add_tail(&priv->host_msg_queue[i].chain,
+			&priv->host_msg_free_list);
+	spin_unlock_bh(&priv->list_lock);
+}
+
+static struct list_head *moca_detach_head(struct moca_priv_data *priv,
+	struct list_head *h)
+{
+	struct list_head *r = NULL;
+
+	spin_lock_bh(&priv->list_lock);
+	if (!list_empty(h)) {
+		r = h->next;
+		list_del(r);
+	}
+	spin_unlock_bh(&priv->list_lock);
+
+	return r;
+}
+
+static void moca_attach_tail(struct moca_priv_data *priv,
+	struct list_head *elem, struct list_head *list)
+{
+	spin_lock_bh(&priv->list_lock);
+	list_add_tail(elem, list);
+	spin_unlock_bh(&priv->list_lock);
+}
+
+/* Must have dev_mutex when calling this function */
+static int moca_recvmsg(struct moca_priv_data *priv, uintptr_t offset,
+	u32 max_size, uintptr_t reply_offset, u32 cpuid)
+{
+	struct list_head *ml = NULL;
+	struct moca_core_msg *m;
+	unsigned int w, rw, num_ies;
+	u32 data, size;
+	char *msg;
+	int err = -ENOMEM;
+	u32 *reply = priv->core_resp_buf;
+	int attach = 1;
+
+	m = &priv->core_msg_temp;
+
+	/* make sure we have the mailbox offset before using it */
+	moca_get_mbx_offset(priv);
+
+	/* read only as much as is necessary.
+	   The second word is the length for mmp_20 */
+	if (priv->mmp_20) {
+		moca_read_mem(priv, m->data,
+			offset + priv->mbx_offset[cpuid], 8);
+
+		size = (be32_to_cpu(m->data[1])+3) & 0xFFFFFFFC;
+		/* if size is too large, this is a protocol error.
+		   mocad will output the error message */
+		if (size > max_size - 8)
+			size = max_size - 8;
+
+		moca_read_mem(priv, &m->data[2],
+			offset + priv->mbx_offset[cpuid] + 8, size);
+	} else
+		moca_read_mem(priv, m->data,
+			offset + priv->mbx_offset[cpuid], max_size);
+
+	data = be32_to_cpu(m->data[0]);
+
+	if (priv->mmp_20) {
+		/* In MoCA 2.0, there is only 1 IE per message */
+		num_ies = 1;
+	} else {
+		num_ies = data & 0xffff;
+	}
+
+	if (reply_offset) {
+		if (priv->mmp_20) {
+			/* In MoCA 2.0, the ACK is to simply set the
+			   MSB in the incoming message and send it
+			   back */
+			reply[0] = cpu_to_be32(data | 0x80000000);
+			rw = 1;
+		} else {
+			/* ACK + seq number + number of IEs */
+			reply[0] = cpu_to_be32((data & 0x00ff0000) |
+				0x04000000 | num_ies);
+			rw = 1;
+		}
+	}
+
+	err = -EINVAL;
+	w = 1;
+	max_size >>= 2;
+	while (num_ies) {
+		if (w >= max_size) {
+			msg = "dropping long message";
+			goto bad;
+		}
+
+		data = be32_to_cpu(m->data[w++]);
+
+		if (reply_offset && !priv->mmp_20) {
+			/*
+			 * ACK each IE in the original message;
+			 * return code is always 0
+			 */
+			if ((rw << 2) >= priv->core_resp_size)
+				dev_warn(priv->dev,
+					 "Core ack buffer overflowed\n");
+			else {
+				reply[rw] = cpu_to_be32((data & ~0xffff) | 4);
+				rw++;
+				reply[rw] = cpu_to_be32(0);
+				rw++;
+			}
+		}
+		if (data & 3) {
+			msg = "IE is not a multiple of 4 bytes";
+			goto bad;
+		}
+
+		w += ((data & 0xffff) >> 2);
+
+		if (w > max_size) {
+			msg = "dropping long message";
+			goto bad;
+		}
+		num_ies--;
+	}
+	m->len = w << 2;
+
+	/* special case for lab_printf traps */
+	moca_handle_lab_printf(priv, m);
+
+	/*
+	 * Check to see if we can add this new message to the current queue.
+	 * The result will be a single message with multiple IEs.
+	 */
+	if (!priv->mmp_20) {
+		spin_lock_bh(&priv->list_lock);
+		if (!list_empty(&priv->core_msg_pend_list)) {
+			ml = priv->core_msg_pend_list.prev;
+			m = list_entry(ml, struct moca_core_msg, chain);
+
+			if (m->len + priv->core_msg_temp.len > max_size)
+				ml = NULL;
+			else {
+				u32 d0 = be32_to_cpu(
+						priv->core_msg_temp.data[0]);
+
+				/* Only concatenate traps from the core */
+				if (((be32_to_cpu(m->data[0]) & 0xff000000) !=
+					0x09000000) ||
+					((d0 & 0xff000000) != 0x09000000))
+					ml = NULL;
+				else {
+					/*
+					 * We can add the message to the
+					 * previous one. Update the num of IEs,
+					 * update the length and copy the data.
+					 */
+					data = be32_to_cpu(m->data[0]);
+					num_ies = data & 0xffff;
+					num_ies += d0 & 0xffff;
+					data &= 0xffff0000;
+					data |= num_ies;
+					m->data[0] = cpu_to_be32(data);
+
+					/*
+					 * Subtract 4 bytes from length for
+					   message header
+					 */
+					memcpy(&m->data[m->len >> 2],
+						&priv->core_msg_temp.data[1],
+						priv->core_msg_temp.len - 4);
+					m->len += priv->core_msg_temp.len - 4;
+					attach = 0;
+				}
+			}
+		}
+		spin_unlock_bh(&priv->list_lock);
+	}
+
+	if (ml == NULL) {
+		ml = moca_detach_head(priv, &priv->core_msg_free_list);
+		if (ml == NULL) {
+			msg = "no entries left on core_msg_free_list";
+			err = -ENOMEM;
+			goto bad;
+		}
+		m = list_entry(ml, struct moca_core_msg, chain);
+
+		memcpy(m->data, priv->core_msg_temp.data,
+			priv->core_msg_temp.len);
+		m->len = priv->core_msg_temp.len;
+	}
+
+	if (reply_offset) {
+		if ((cpuid == 1) &&
+			(moca_irq_status(priv, NO_FLUSH_IRQ) & M2H_ASSERT)) {
+			/* do not retry - message is gone forever */
+			err = 0;
+			msg = "core_req overwritten by assertion";
+			goto bad;
+		}
+		if ((cpuid == 0) &&
+			(moca_irq_status(priv, NO_FLUSH_IRQ)
+			& M2H_ASSERT_CPU0)) {
+			/* do not retry - message is gone forever */
+			err = 0;
+			msg = "core_req overwritten by assertion";
+			goto bad;
+		}
+		moca_write_mem(priv, reply_offset + priv->mbx_offset[cpuid],
+			reply, rw << 2);
+		moca_ringbell(priv, priv->regs->h2m_resp_bit[cpuid]);
+	}
+
+	if (attach) {
+		moca_attach_tail(priv, ml, &priv->core_msg_pend_list);
+		wake_up(&priv->core_msg_wq);
+	}
+
+	return 0;
+
+bad:
+	dev_warn(priv->dev, "%s\n", msg);
+
+	if (ml)
+		moca_attach_tail(priv, ml, &priv->core_msg_free_list);
+
+	return err;
+}
+
+static int moca_h2m_sanity_check(struct moca_priv_data *priv,
+	struct moca_host_msg *m)
+{
+	unsigned int w, num_ies;
+	u32 data;
+
+	if (priv->mmp_20) {
+		/* The length is stored in data[1]
+		   plus 8 extra header bytes */
+		data = be32_to_cpu(m->data[1]) + 8;
+		if (data > priv->host_req_size)
+			return -1;
+		else
+			return (int) data;
+	} else {
+		data = be32_to_cpu(m->data[0]);
+		num_ies = data & 0xffff;
+
+		w = 1;
+		while (num_ies) {
+			if (w >= (m->len << 2))
+				return -1;
+
+			data = be32_to_cpu(m->data[w++]);
+
+			if (data & 3)
+				return -1;
+			w += (data & 0xffff) >> 2;
+			num_ies--;
+		}
+		return w << 2;
+	}
+}
+
+/* Must have dev_mutex when calling this function */
+static int moca_sendmsg(struct moca_priv_data *priv, u32 cpuid)
+{
+	struct list_head *ml = NULL;
+	struct moca_host_msg *m;
+
+	if (priv->host_mbx_busy == 1)
+		return -1;
+
+	ml = moca_detach_head(priv, &priv->host_msg_pend_list);
+	if (ml == NULL)
+		return -EAGAIN;
+	m = list_entry(ml, struct moca_host_msg, chain);
+
+	moca_write_mem(priv, priv->mbx_offset[cpuid] + priv->host_req_offset,
+		m->data, m->len);
+
+	moca_ringbell(priv, priv->regs->h2m_req_bit[cpuid]);
+	moca_attach_tail(priv, ml, &priv->host_msg_free_list);
+	wake_up(&priv->host_msg_wq);
+
+	return 0;
+}
+
+/* Must have dev_mutex when calling this function */
+static int moca_wdt(struct moca_priv_data *priv, u32 cpu)
+{
+	struct list_head *ml = NULL;
+	struct moca_core_msg *m;
+
+	ml = moca_detach_head(priv, &priv->core_msg_free_list);
+	if (ml == NULL) {
+		dev_warn(priv->dev, "no entries left on core_msg_free_list\n");
+		return -ENOMEM;
+	}
+
+	if (priv->mmp_20) {
+		/*
+		 * generate phony wdt message to pass to the user
+		 * type = 0x03 (trap)
+		 * IE type = 0x11003 (wdt), 4 bytes length
+		 */
+		m = list_entry(ml, struct moca_core_msg, chain);
+		m->data[0] = cpu_to_be32(0x3);
+		m->data[1] = cpu_to_be32(4);
+		m->data[2] = cpu_to_be32(0x11003);
+		m->len = 12;
+	} else {
+		/*
+		 * generate phony wdt message to pass to the user
+		 * type = 0x09 (trap)
+		 * IE type = 0xff01 (wdt), 4 bytes length
+		 */
+		m = list_entry(ml, struct moca_core_msg, chain);
+		m->data[0] = cpu_to_be32(0x09000001);
+		m->data[1] = cpu_to_be32((MOCA_IE_WDT << 16) | 4);
+		m->data[2] = cpu_to_be32(cpu);
+		m->len = 12;
+	}
+
+	moca_attach_tail(priv, ml, &priv->core_msg_pend_list);
+	wake_up(&priv->core_msg_wq);
+
+	return 0;
+}
+
+void moca_set_pm_state(struct moca_priv_data *priv, enum moca_pm_states state)
+{
+	dev_info(priv->dev, "state %s -> %s\n", moca_state_string[priv->state],
+		 moca_state_string[state]);
+	priv->state = state;
+}
+
+static int  __maybe_unused moca_send_pm_trap(struct moca_priv_data *priv,
+					     enum moca_pm_states state)
+{
+	struct list_head *ml = NULL;
+	struct moca_core_msg *m;
+
+	ml = moca_detach_head(priv, &priv->core_msg_free_list);
+	if (ml == NULL) {
+		dev_warn(priv->dev, "no entries left on core_msg_free_list\n");
+		return -ENOMEM;
+	}
+
+	if (priv->mmp_20) {
+		/*
+		 * generate an IE_PM_NOTIFICATION trap to the user space
+		 */
+		m = list_entry(ml, struct moca_core_msg, chain);
+		m->data[0] = cpu_to_be32(0x3);
+		m->data[1] = cpu_to_be32(8);
+		m->data[2] = cpu_to_be32(0x11014);
+		m->data[3] = cpu_to_be32(state);
+		m->len = 16;
+		moca_attach_tail(priv, ml, &priv->core_msg_pend_list);
+		wake_up(&priv->core_msg_wq);
+	}
+
+	return 0;
+}
+
+static int moca_get_mbx_offset(struct moca_priv_data *priv)
+{
+	const struct moca_regs *r = priv->regs;
+	uintptr_t base;
+
+	if (priv->mbx_offset[1] == -1) {
+		if (moca_is_20(priv))
+			base = MOCA_RD(priv->base +
+				r->moca2host_mmp_inbox_0_offset) &
+				0x1fffffff;
+		else
+			base = MOCA_RD(priv->base + r->gp0_offset) &
+				0x1fffffff;
+
+		if ((base == 0) ||
+			(base >= r->cntl_mem_size + r->cntl_mem_offset) ||
+			(base & 0x07)) {
+			dev_warn(priv->dev,
+				 "can't get mailbox base CPU 1 (%X)\n",
+				 (int)base);
+			return -1;
+		}
+		priv->mbx_offset[1] = base;
+	}
+
+	if ((priv->mbx_offset[0] == -1) && moca_is_20(priv) && priv->mmp_20) {
+		base = MOCA_RD(priv->base +
+			r->moca2host_mmp_inbox_2_offset) &
+			0x1fffffff;
+		if ((base == 0) ||
+			(base >= r->cntl_mem_size + r->cntl_mem_offset) ||
+			(base & 0x07)) {
+			dev_warn(priv->dev,
+				 "can't get mailbox base CPU 0 (%X)\n",
+				 (int)base);
+			return -1;
+		}
+
+		priv->mbx_offset[0] = base;
+	}
+
+	return 0;
+}
+
+/*
+ * INTERRUPT / WORKQUEUE BH
+ */
+
+static void moca_work_handler(struct work_struct *work)
+{
+	struct moca_priv_data *priv =
+		container_of(work, struct moca_priv_data, work);
+	u32 mask = 0;
+	int ret, stopped = 0;
+
+	mutex_lock(&priv->dev_mutex);
+
+	if (priv->enabled) {
+		mask = moca_irq_status(priv, FLUSH_IRQ);
+		if (mask & M2H_DMA) {
+			mask &= ~M2H_DMA;
+			complete(&priv->copy_complete);
+		}
+
+		if (mask & M2H_NEXTCHUNK) {
+			mask &= ~M2H_NEXTCHUNK;
+			complete(&priv->chunk_complete);
+		}
+
+		if (moca_is_20(priv) && mask & M2H_NEXTCHUNK_CPU0) {
+			mask &= ~M2H_NEXTCHUNK_CPU0;
+			complete(&priv->chunk_complete);
+		}
+
+		if (mask == 0) {
+			mutex_unlock(&priv->dev_mutex);
+			moca_enable_irq(priv);
+			return;
+		}
+
+		if (mask & (M2H_REQ | M2H_RESP |
+			M2H_REQ_CPU0 | M2H_RESP_CPU0)) {
+			if (moca_get_mbx_offset(priv)) {
+				/* mbx interrupt but mbx_offset is bogus?? */
+				mutex_unlock(&priv->dev_mutex);
+				moca_enable_irq(priv);
+				return;
+			}
+		}
+	}
+
+	if (!priv->running) {
+		stopped = 1;
+	} else {
+		/* fatal events */
+		if (mask & M2H_ASSERT) {
+			ret = moca_recvmsg(priv, priv->core_req_offset,
+				priv->core_req_size, 0, 1);
+			if (ret == -ENOMEM)
+				priv->assert_pending = 2;
+		}
+		if (mask & M2H_ASSERT_CPU0) {
+			ret = moca_recvmsg(priv, priv->core_req_offset,
+				priv->core_req_size, 0, 0);
+			if (ret == -ENOMEM)
+				priv->assert_pending = 1;
+		}
+		/* M2H_WDT_CPU1 is mapped to the only CPU for MoCA11 HW */
+		if (mask & M2H_WDT_CPU1) {
+			ret = moca_wdt(priv, 2);
+			if (ret == -ENOMEM)
+				priv->wdt_pending |= BIT(1);
+			stopped = 1;
+		}
+		if (moca_is_20(priv) && mask & M2H_WDT_CPU0) {
+			ret = moca_wdt(priv, 1);
+			if (ret == -ENOMEM)
+				priv->wdt_pending |= BIT(0);
+			stopped = 1;
+		}
+	}
+	if (stopped) {
+		priv->running = 0;
+		priv->core_req_pending = 0;
+		priv->host_resp_pending = 0;
+		priv->host_mbx_busy = 1;
+		mutex_unlock(&priv->dev_mutex);
+		wake_up(&priv->core_msg_wq);
+		return;
+	}
+
+	/* normal events */
+	if (mask & M2H_REQ) {
+		ret = moca_recvmsg(priv, priv->core_req_offset,
+			priv->core_req_size, priv->core_resp_offset, 1);
+		if (ret == -ENOMEM)
+			priv->core_req_pending = 2;
+	}
+	if (mask & M2H_RESP) {
+		ret = moca_recvmsg(priv, priv->host_resp_offset,
+			priv->host_resp_size, 0, 1);
+		if (ret == -ENOMEM)
+			priv->host_resp_pending = 2;
+		if (ret == 0) {
+			priv->host_mbx_busy = 0;
+			moca_sendmsg(priv, 1);
+		}
+	}
+
+	if (mask & M2H_REQ_CPU0) {
+		ret = moca_recvmsg(priv, priv->core_req_offset,
+			priv->core_req_size, priv->core_resp_offset, 0);
+		if (ret == -ENOMEM)
+			priv->core_req_pending = 1;
+	}
+	if (mask & M2H_RESP_CPU0) {
+		ret = moca_recvmsg(priv, priv->host_resp_offset,
+			priv->host_resp_size, 0, 0);
+		if (ret == -ENOMEM)
+			priv->host_resp_pending = 1;
+		if (ret == 0) {
+			priv->host_mbx_busy = 0;
+			moca_sendmsg(priv, 0);
+		}
+	}
+	mutex_unlock(&priv->dev_mutex);
+
+	moca_enable_irq(priv);
+}
+
+static irqreturn_t moca_interrupt(int irq, void *arg)
+{
+	struct moca_priv_data *priv = arg;
+
+	if (1) {
+		u32 mask = moca_irq_status(priv, FLUSH_DMA_ONLY);
+
+		/* need to handle DMA completions ASAP */
+		if (mask & M2H_DMA) {
+			complete(&priv->copy_complete);
+			mask &= ~M2H_DMA;
+		}
+		if (mask & M2H_NEXTCHUNK) {
+			complete(&priv->chunk_complete);
+			mask &= ~M2H_NEXTCHUNK;
+		}
+
+		if (!mask)
+			return IRQ_HANDLED;
+	}
+	moca_disable_irq(priv);
+	schedule_work(&priv->work);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * BCM3450 ACCESS VIA I2C
+ */
+
+static int moca_3450_wait(struct moca_priv_data *priv)
+{
+	struct bsc_regs *bsc = priv->i2c_base;
+	long timeout = HZ / 1000;	/* 1ms */
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait);
+	int i = 0;
+
+	do {
+		if (I2C_RD(&bsc->iic_enable) & 2) {
+			I2C_WR(&bsc->iic_enable, 0);
+			return 0;
+		}
+		if (i++ > 50) {
+			I2C_WR(&bsc->iic_enable, 0);
+			dev_warn(priv->dev, "3450 I2C timed out\n");
+			return -1;
+		}
+		sleep_on_timeout(&wait, timeout ? timeout : 1);
+	} while (1);
+}
+
+static void moca_3450_write_i2c(struct moca_priv_data *priv, u8 addr, u32 data)
+{
+	struct bsc_regs *bsc = priv->i2c_base;
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+
+	I2C_WR(&bsc->iic_enable, 0);
+	I2C_WR(&bsc->chip_address, pd->bcm3450_i2c_addr << 1);
+	I2C_WR(&bsc->data_in[0], (addr >> 2) | (data << 8));
+	I2C_WR(&bsc->data_in[1], data >> 24);
+	I2C_WR(&bsc->cnt_reg, (5 << 0) | (0 << 6)); /* 5B out, 0B in */
+	I2C_WR(&bsc->ctl_reg, (1 << 4) | (0 << 0)); /* write only, 390kHz */
+	I2C_WR(&bsc->ctlhi_reg, (1 << 6));          /* 32-bit words */
+	I2C_WR(&bsc->iic_enable, 1);
+
+	moca_3450_wait(priv);
+}
+
+static u32 moca_3450_read_i2c(struct moca_priv_data *priv, u8 addr)
+{
+	struct bsc_regs *bsc = priv->i2c_base;
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+
+	I2C_WR(&bsc->iic_enable, 0);
+	I2C_WR(&bsc->chip_address, pd->bcm3450_i2c_addr << 1);
+	I2C_WR(&bsc->data_in[0], (addr >> 2));
+	I2C_WR(&bsc->cnt_reg, (1 << 0) | (4 << 6));   /* 1B out then 4B in */
+	I2C_WR(&bsc->ctl_reg, (1 << 4) | (3 << 0));   /* write/read, 390kHz */
+	I2C_WR(&bsc->ctlhi_reg, (1 << 6));	      /* 32-bit words */
+	I2C_WR(&bsc->iic_enable, 1);
+
+	if (moca_3450_wait(priv) == 0)
+		return I2C_RD(&bsc->data_out[0]);
+	else
+		return 0xffffffff;
+}
+
+#define BCM3450_CHIP_ID		0x00
+#define BCM3450_CHIP_REV	0x04
+#define BCM3450_LNACNTL		0x14
+#define BCM3450_PACNTL		0x18
+#define BCM3450_MISC		0x1c
+
+static int moca_3450_get_reg(struct moca_priv_data *priv, unsigned int  *arg)
+{
+	struct moca_xfer x;
+	u32 *dst;
+	u32 val;
+
+	if (!priv->i2c_base)
+		return -ENODEV;
+
+	if (copy_from_user(&x, (void __user *)arg, sizeof(x)))
+		return -EFAULT;
+
+	dst = (u32 *)(unsigned long)x.buf;
+
+	mutex_lock(&priv->moca_i2c_mutex);
+	val = moca_3450_read(priv, x.moca_addr);
+	mutex_unlock(&priv->moca_i2c_mutex);
+
+	if (put_user(val, dst))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int moca_3450_set_reg(struct moca_priv_data *priv, unsigned int  *arg)
+{
+	struct moca_xfer x;
+	u32 val;
+
+	if (!priv->i2c_base)
+		return -ENODEV;
+
+	if (copy_from_user(&x, (void __user *)arg, sizeof(x)))
+		return -EFAULT;
+
+	mutex_lock(&priv->moca_i2c_mutex);
+
+	if (get_user(val, (u32 *)(unsigned long)x.buf))
+		return -EFAULT;
+
+	moca_3450_write(priv, x.moca_addr, val);
+	mutex_unlock(&priv->moca_i2c_mutex);
+
+	return 0;
+}
+
+static void moca_3450_init(struct moca_priv_data *priv, int action)
+{
+	u32 data;
+
+	/* some platforms connect the i2c directly to the MoCA core */
+	if (!priv->i2c_base)
+		return;
+
+	mutex_lock(&priv->moca_i2c_mutex);
+
+	if (action == MOCA_ENABLE) {
+		/* reset the 3450's I2C block */
+		moca_3450_write(priv, BCM3450_MISC,
+			moca_3450_read(priv, BCM3450_MISC) | 1);
+
+		/* verify chip ID */
+		data = moca_3450_read(priv, BCM3450_CHIP_ID);
+		if (data != 0x3450)
+			dev_warn(priv->dev, "invalid 3450 chip ID 0x%08x\n",
+				 data);
+
+		/* reset the 3450's deserializer */
+		data = moca_3450_read(priv, BCM3450_MISC);
+		data &= ~0x8000; /* power on PA/LNA */
+		moca_3450_write(priv, BCM3450_MISC, data | 2);
+		moca_3450_write(priv, BCM3450_MISC, data & ~2);
+
+		/* set new PA gain */
+		data = moca_3450_read(priv, BCM3450_PACNTL);
+
+		moca_3450_write(priv, BCM3450_PACNTL, (data & ~0x02007ffd) |
+			(0x09 << 11) |		/* RDEG */
+			(0x38 << 5) |		/* CURR_CONT */
+			(0x05 << 2));		/* CURR_FOLLOWER */
+
+		/* Set LNACNTRL to default value */
+		moca_3450_write(priv, BCM3450_LNACNTL, 0x4924);
+
+	} else {
+		/* power down the PA/LNA */
+		data = moca_3450_read(priv, BCM3450_MISC);
+		moca_3450_write(priv, BCM3450_MISC, data | 0x8000);
+
+		data = moca_3450_read(priv, BCM3450_PACNTL);
+		moca_3450_write(priv, BCM3450_PACNTL, data |
+			BIT(0) |	/* PA_PWRDWN */
+			BIT(25));	/* PA_SELECT_PWRUP_BSC */
+
+		data = moca_3450_read(priv, BCM3450_LNACNTL);
+		/* LNA_INBIAS=0, LNA_PWRUP_IIC=0: */
+		data &= ~((7<<12) | (1<<28));
+		/* LNA_SELECT_PWRUP_IIC=1: */
+		moca_3450_write(priv, BCM3450_LNACNTL, data | (1<<29));
+
+	}
+	mutex_unlock(&priv->moca_i2c_mutex);
+}
+
+/*
+ * FILE OPERATIONS
+ */
+
+static int moca_file_open(struct inode *inode, struct file *file)
+{
+	unsigned int minor = iminor(inode);
+	struct moca_priv_data *priv;
+
+	if ((minor > NUM_MINORS) || minor_tbl[minor] == NULL)
+		return -ENODEV;
+
+	file->private_data = priv = minor_tbl[minor];
+
+	mutex_lock(&priv->dev_mutex);
+	priv->refcount++;
+	mutex_unlock(&priv->dev_mutex);
+	return 0;
+}
+
+static int moca_file_release(struct inode *inode, struct file *file)
+{
+	struct moca_priv_data *priv = file->private_data;
+
+	mutex_lock(&priv->dev_mutex);
+	priv->refcount--;
+	if (priv->refcount == 0 && priv->running == 1) {
+		/* last user closed the device */
+		moca_msg_reset(priv);
+		moca_hw_init(priv, MOCA_DISABLE);
+	}
+	mutex_unlock(&priv->dev_mutex);
+	return 0;
+}
+
+static int moca_ioctl_readmem(struct moca_priv_data *priv,
+	unsigned long xfer_uaddr)
+{
+	struct moca_xfer x;
+	uintptr_t i, src;
+	u32 *dst;
+
+	if (copy_from_user(&x, (void __user *)xfer_uaddr, sizeof(x)))
+		return -EFAULT;
+
+	if (moca_range_ok(priv, x.moca_addr, x.len) < 0)
+		return -EINVAL;
+
+	src = (uintptr_t)priv->base + x.moca_addr;
+	dst = (void *)(unsigned long)x.buf;
+
+	for (i = 0; i < x.len; i += 4, src += 4, dst++)
+		if (put_user(cpu_to_be32(MOCA_RD(src)), dst))
+			return -EFAULT;
+
+	return 0;
+}
+
+static int moca_ioctl_writemem(struct moca_priv_data *priv,
+	unsigned long xfer_uaddr)
+{
+	struct moca_xfer x;
+	uintptr_t i, dst;
+	u32 *src;
+
+	if (copy_from_user(&x, (void __user *)xfer_uaddr, sizeof(x)))
+		return -EFAULT;
+
+	if (moca_range_ok(priv, x.moca_addr, x.len) < 0)
+		return -EINVAL;
+
+	dst = (uintptr_t)priv->base + x.moca_addr;
+	src = (void *)(unsigned long)x.buf;
+
+	for (i = 0; i < x.len; i += 4, src++, dst += 4) {
+		unsigned int x;
+		if (get_user(x, src))
+			return -EFAULT;
+
+		MOCA_WR(dst, cpu_to_be32(x));
+	}
+
+	return 0;
+}
+
+/* legacy ioctl - DEPRECATED */
+static int moca_ioctl_get_drv_info_v2(struct moca_priv_data *priv,
+	unsigned long arg)
+{
+	struct moca_kdrv_info_v2 info;
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+
+	memset(&info, 0, sizeof(info));
+	info.version = DRV_VERSION;
+	info.build_number = DRV_BUILD_NUMBER;
+	info.builtin_fw = !!bmoca_fw_image;
+
+	info.uptime = (jiffies - priv->start_time) / HZ;
+	info.refcount = priv->refcount;
+	if (moca_is_20(priv))
+		info.gp1 = priv->running ? MOCA_RD(priv->base +
+			priv->regs->moca2host_mmp_inbox_1_offset) : 0;
+	else
+		info.gp1 = priv->running ?
+			MOCA_RD(priv->base + priv->regs->gp1_offset) : 0;
+
+	memcpy(info.enet_name, pd->enet_name, MOCA_IFNAMSIZ);
+
+	info.enet_id = -1;
+	info.macaddr_hi = pd->macaddr_hi;
+	info.macaddr_lo = pd->macaddr_lo;
+	info.hw_rev = pd->chip_id;
+	info.rf_band = pd->rf_band;
+
+
+	if (copy_to_user((void *)arg, &info, sizeof(info)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int moca_ioctl_get_drv_info(struct moca_priv_data *priv,
+	unsigned long arg)
+{
+	struct moca_kdrv_info info;
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+
+	memset(&info, 0, sizeof(info));
+	info.version = DRV_VERSION;
+	info.build_number = DRV_BUILD_NUMBER;
+	info.builtin_fw = !!bmoca_fw_image;
+
+	info.uptime = (jiffies - priv->start_time) / HZ;
+	info.refcount = priv->refcount;
+	if (moca_is_20(priv))
+		info.gp1 = priv->running ? MOCA_RD(priv->base +
+			priv->regs->moca2host_mmp_inbox_1_offset) : 0;
+	else
+		info.gp1 = priv->running ?
+			MOCA_RD(priv->base + priv->regs->gp1_offset) : 0;
+
+	info.macaddr_hi = pd->macaddr_hi;
+	info.macaddr_lo = pd->macaddr_lo;
+	info.chip_id = pd->chip_id;
+	info.hw_rev = pd->hw_rev;
+	info.rf_band = pd->rf_band;
+	info.phy_freq = priv->phy_freq;
+
+	if (priv->enet_pdev && get_device(&priv->enet_pdev->dev)) {
+		struct net_device *enet_dev;
+		rcu_read_lock();
+		enet_dev = platform_get_drvdata(priv->enet_pdev);
+		if (enet_dev) {
+			dev_hold(enet_dev);
+			strlcpy(info.enet_name, enet_dev->name, IFNAMSIZ);
+			dev_put(enet_dev);
+		}
+		rcu_read_unlock();
+		put_device(&priv->enet_pdev->dev);
+		info.enet_id = MOCA_IFNAME_USE_ID;
+	} else {
+		strlcpy(info.enet_name, pd->enet_name, IFNAMSIZ);
+		info.enet_id = pd->enet_id;
+	}
+
+	if (copy_to_user((void *)arg, &info, sizeof(info)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int moca_ioctl_check_for_data(struct moca_priv_data *priv,
+	unsigned long arg)
+{
+	int data_avail = 0;
+	int ret;
+	u32 mask;
+
+	moca_disable_irq(priv);
+
+	moca_get_mbx_offset(priv);
+
+	/* If an IRQ is pending, process it here rather than waiting for it to
+	   ensure the results are ready. Clear the ones we are currently
+	   processing */
+	mask = moca_irq_status(priv, FLUSH_REQRESP_ONLY);
+
+	if (mask & M2H_REQ) {
+		ret = moca_recvmsg(priv, priv->core_req_offset,
+			priv->core_req_size, priv->core_resp_offset, 1);
+		if (ret == -ENOMEM)
+			priv->core_req_pending = 2;
+	}
+	if (mask & M2H_RESP) {
+		ret = moca_recvmsg(priv, priv->host_resp_offset,
+			priv->host_resp_size, 0, 1);
+		if (ret == -ENOMEM)
+			priv->host_resp_pending = 2;
+		if (ret == 0) {
+			priv->host_mbx_busy = 0;
+			moca_sendmsg(priv, 1);
+		}
+	}
+
+	if (mask & M2H_REQ_CPU0) {
+		ret = moca_recvmsg(priv, priv->core_req_offset,
+			priv->core_req_size, priv->core_resp_offset, 0);
+		if (ret == -ENOMEM)
+			priv->core_req_pending = 1;
+	}
+	if (mask & M2H_RESP_CPU0) {
+		ret = moca_recvmsg(priv, priv->host_resp_offset,
+			priv->host_resp_size, 0, 0);
+		if (ret == -ENOMEM)
+			priv->host_resp_pending = 1;
+		if (ret == 0) {
+			priv->host_mbx_busy = 0;
+			moca_sendmsg(priv, 0);
+		}
+	}
+
+	moca_enable_irq(priv);
+
+	spin_lock_bh(&priv->list_lock);
+	data_avail = !list_empty(&priv->core_msg_pend_list);
+	spin_unlock_bh(&priv->list_lock);
+
+	if (copy_to_user((void *)arg, &data_avail, sizeof(data_avail)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static long moca_file_ioctl(struct file *file, unsigned int cmd,
+	unsigned long arg)
+{
+	struct moca_priv_data *priv = file->private_data;
+	struct moca_start start;
+	long ret = -ENOTTY;
+
+	mutex_lock(&priv->dev_mutex);
+
+	switch (cmd) {
+	case MOCA_IOCTL_START:
+		ret = moca_clk_set_rate(priv->phy_clk, DEFAULT_PHY_CLOCK);
+		/* FIXME: this fails on some platforms, so ignore the value */
+		ret = 0;
+		if (ret < 0)
+			break;
+
+		if (copy_from_user(&start, (void __user *)arg, sizeof(start)))
+			ret = -EFAULT;
+
+		if (ret >= 0) {
+			priv->bonded_mode =
+				(start.boot_flags & MOCA_BOOT_FLAGS_BONDED);
+			if (!priv->enabled) {
+				moca_msg_reset(priv);
+				moca_hw_init(priv, MOCA_ENABLE);
+				moca_3450_init(priv, MOCA_ENABLE);
+				moca_irq_status(priv, FLUSH_IRQ);
+				moca_mmp_init(priv, 0);
+			}
+
+			ret = moca_write_img(priv, &start.x);
+			if (ret >= 0)
+				priv->running = 1;
+		}
+		break;
+#ifdef CONFIG_PM
+	case MOCA_IOCTL_PM_SUSPEND:
+	case MOCA_IOCTL_PM_WOL:
+		if (priv->state != MOCA_SUSPENDING_WAITING_ACK)
+			dev_warn(priv->dev,"state is %s expected %s\n",
+				 moca_state_string[priv->state],
+				 moca_state_string[MOCA_SUSPENDING_WAITING_ACK]);
+		else {
+			complete(&priv->suspend_complete);
+			moca_set_pm_state(priv, MOCA_SUSPENDING_GOT_ACK);
+		}
+		ret = 0;
+		break;
+#endif
+	case MOCA_IOCTL_STOP:
+		moca_msg_reset(priv);
+		moca_3450_init(priv, MOCA_DISABLE);
+		moca_hw_init(priv, MOCA_DISABLE);
+		ret = 0;
+		break;
+	case MOCA_IOCTL_READMEM:
+		if (priv->running)
+			ret = moca_ioctl_readmem(priv, arg);
+		break;
+	case MOCA_IOCTL_WRITEMEM:
+		if (priv->running)
+			ret = moca_ioctl_writemem(priv, arg);
+		break;
+	case MOCA_IOCTL_GET_DRV_INFO_V2:
+		ret = moca_ioctl_get_drv_info_v2(priv, arg);
+		break;
+	case MOCA_IOCTL_GET_DRV_INFO:
+		ret = moca_ioctl_get_drv_info(priv, arg);
+		break;
+	case MOCA_IOCTL_CHECK_FOR_DATA:
+		if (priv->running)
+			ret = moca_ioctl_check_for_data(priv, arg);
+		else
+			ret = -EIO;
+		break;
+	case MOCA_IOCTL_WOL:
+		priv->wol_enabled = (int)arg;
+		dev_info(priv->dev, "WOL is %s\n",
+			priv->wol_enabled ? "enabled" : "disabled");
+		ret = 0;
+		break;
+	case MOCA_IOCTL_SET_CPU_RATE:
+		if (!priv->cpu_clk)
+			ret = -EIO;
+		else
+			ret = moca_clk_set_rate(priv->cpu_clk,
+						     (unsigned int)arg);
+		break;
+	case MOCA_IOCTL_SET_PHY_RATE:
+		if (!priv->phy_clk)
+			ret = -EIO;
+		else
+			ret = moca_clk_set_rate(priv->phy_clk,
+						     (unsigned int)arg);
+		break;
+	case MOCA_IOCTL_GET_3450_REG:
+		ret = moca_3450_get_reg(priv, (unsigned int *)arg);
+		break;
+	case MOCA_IOCTL_SET_3450_REG:
+		ret = moca_3450_set_reg(priv, (unsigned int *)arg);
+		break;
+
+	}
+	mutex_unlock(&priv->dev_mutex);
+
+	return ret;
+}
+
+static ssize_t moca_file_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	struct moca_priv_data *priv = file->private_data;
+	DECLARE_WAITQUEUE(wait, current);
+	struct list_head *ml = NULL;
+	struct moca_core_msg *m = NULL;
+	ssize_t ret;
+	int empty_free_list = 0;
+
+	if (count < priv->core_req_size)
+		return -EINVAL;
+
+	add_wait_queue(&priv->core_msg_wq, &wait);
+	do {
+		__set_current_state(TASK_INTERRUPTIBLE);
+
+		ml = moca_detach_head(priv, &priv->core_msg_pend_list);
+		if (ml != NULL) {
+			m = list_entry(ml, struct moca_core_msg, chain);
+			ret = 0;
+			break;
+		}
+		if (file->f_flags & O_NONBLOCK) {
+			ret = -EAGAIN;
+			break;
+		}
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+		schedule();
+	} while (1);
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&priv->core_msg_wq, &wait);
+
+	if (ret < 0)
+		return ret;
+
+	if (copy_to_user(buf, m->data, m->len))
+		ret = -EFAULT;	/* beware: message will be dropped */
+	else
+		ret = m->len;
+
+	spin_lock_bh(&priv->list_lock);
+	if (list_empty(&priv->core_msg_free_list))
+		empty_free_list = 1;
+	list_add_tail(ml, &priv->core_msg_free_list);
+	spin_unlock_bh(&priv->list_lock);
+
+	if (empty_free_list) {
+		/*
+		 * we just freed up space for another message, so if there was
+		 * a backlog, clear it out
+		 */
+		mutex_lock(&priv->dev_mutex);
+
+		if (moca_get_mbx_offset(priv)) {
+			mutex_unlock(&priv->dev_mutex);
+			return -EIO;
+		}
+
+		if (priv->assert_pending & 2) {
+			if (moca_recvmsg(priv, priv->core_req_offset,
+				priv->core_req_size, 0, 1) != -ENOMEM)
+				priv->assert_pending &= ~2;
+			else
+				dev_warn(priv->dev,
+					 "moca_recvmsg assert failed\n");
+		}
+		if (priv->assert_pending & 1) {
+			if (moca_recvmsg(priv, priv->core_req_offset,
+				priv->core_req_size, 0, 0) != -ENOMEM)
+				priv->assert_pending &= ~1;
+			else
+				dev_warn(priv->dev,
+					 "moca_recvmsg assert failed\n");
+		}
+		if (priv->wdt_pending)
+			if (moca_wdt(priv, priv->wdt_pending) != -ENOMEM)
+				priv->wdt_pending = 0;
+
+		if (priv->core_req_pending & 1) {
+			if (moca_recvmsg(priv, priv->core_req_offset,
+				priv->core_req_size, priv->core_resp_offset, 0)
+				!= -ENOMEM)
+				priv->core_req_pending &= ~1;
+			else
+				dev_warn(priv->dev,
+					 "moca_recvmsg core_req failed\n");
+		}
+		if (priv->core_req_pending & 2) {
+			if (moca_recvmsg(priv, priv->core_req_offset,
+				priv->core_req_size, priv->core_resp_offset, 1)
+				!= -ENOMEM)
+				priv->core_req_pending &= ~2;
+			else
+				dev_warn(priv->dev,
+					 "moca_recvmsg core_req failed\n");
+		}
+		if (priv->host_resp_pending & 1) {
+			if (moca_recvmsg(priv, priv->host_resp_offset,
+				priv->host_resp_size, 0, 0) != -ENOMEM)
+				priv->host_resp_pending &= ~1;
+			else
+				dev_warn(priv->dev,
+					 "moca_recvmsg host_resp failed\n");
+		}
+		if (priv->host_resp_pending & 2) {
+			if (moca_recvmsg(priv, priv->host_resp_offset,
+				priv->host_resp_size, 0, 1) != -ENOMEM)
+				priv->host_resp_pending &= ~2;
+			else
+				dev_warn(priv->dev,
+					 "moca_recvmsg host_resp failed\n");
+		}
+		mutex_unlock(&priv->dev_mutex);
+	}
+
+	return ret;
+}
+
+static ssize_t moca_file_write(struct file *file, const char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	struct moca_priv_data *priv = file->private_data;
+	DECLARE_WAITQUEUE(wait, current);
+	struct list_head *ml = NULL;
+	struct moca_host_msg *m = NULL;
+	ssize_t ret;
+	u32 cpuid;
+
+	if (count > priv->host_req_size)
+		return -EINVAL;
+
+	add_wait_queue(&priv->host_msg_wq, &wait);
+	do {
+		__set_current_state(TASK_INTERRUPTIBLE);
+
+		ml = moca_detach_head(priv, &priv->host_msg_free_list);
+		if (ml != NULL) {
+			m = list_entry(ml, struct moca_host_msg, chain);
+			ret = 0;
+			break;
+		}
+		if (file->f_flags & O_NONBLOCK) {
+			ret = -EAGAIN;
+			break;
+		}
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+		schedule();
+	} while (1);
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&priv->host_msg_wq, &wait);
+
+	if (ret < 0)
+		return ret;
+
+	m->len = count;
+
+	if (copy_from_user(m->data, buf, m->len)) {
+		ret = -EFAULT;
+		goto bad;
+	}
+
+	ret = moca_h2m_sanity_check(priv, m);
+	if (ret < 0) {
+		ret = -EINVAL;
+		goto bad;
+	}
+
+	moca_attach_tail(priv, ml, &priv->host_msg_pend_list);
+
+	if (!priv->mmp_20)
+		cpuid = 1;
+	else {
+		if (cpu_to_be32(m->data[0]) & 0x10)
+			cpuid = 0;
+		else
+			cpuid = 1;
+	}
+	mutex_lock(&priv->dev_mutex);
+	if (priv->running) {
+		if (moca_get_mbx_offset(priv))
+			ret = -EIO;
+		else
+			moca_sendmsg(priv, cpuid);
+	} else
+		ret = -EIO;
+	mutex_unlock(&priv->dev_mutex);
+
+	return ret;
+
+bad:
+	moca_attach_tail(priv, ml, &priv->host_msg_free_list);
+
+	return ret;
+}
+
+static unsigned int moca_file_poll(struct file *file, poll_table *wait)
+{
+	struct moca_priv_data *priv = file->private_data;
+	unsigned int ret = 0;
+
+	poll_wait(file, &priv->core_msg_wq, wait);
+	poll_wait(file, &priv->host_msg_wq, wait);
+
+	spin_lock_bh(&priv->list_lock);
+	if (!list_empty(&priv->core_msg_pend_list))
+		ret |= POLLIN | POLLRDNORM;
+	if (!list_empty(&priv->host_msg_free_list))
+		ret |= POLLOUT | POLLWRNORM;
+	spin_unlock_bh(&priv->list_lock);
+
+	return ret;
+}
+
+static const struct file_operations moca_fops = {
+	.owner =		THIS_MODULE,
+	.open =			moca_file_open,
+	.release =		moca_file_release,
+	.unlocked_ioctl =	moca_file_ioctl,
+	.read =			moca_file_read,
+	.write =		moca_file_write,
+	.poll =			moca_file_poll,
+};
+
+/*
+ * PLATFORM DRIVER
+ */
+#ifdef CONFIG_OF
+static int moca_parse_dt_node(struct moca_priv_data *priv)
+{
+	struct platform_device *pdev = priv->pdev;
+	struct moca_platform_data pd;
+	struct device_node *of_node = pdev->dev.of_node, *enet_node;
+	phandle enet_ph;
+	int status = 0, i = 0;
+	const u8 *macaddr;
+	const char *rfb;
+	const char *const of_rfb[MOCA_BAND_MAX + 1] = MOCA_BAND_NAMES;
+
+	memset(&pd, 0, sizeof(pd));
+
+	/* mandatory entries */
+	status = of_property_read_u32(of_node, "hw-rev", &pd.hw_rev);
+	if (status)
+		goto err;
+
+	status = of_property_read_u32(of_node, "enet-id", &enet_ph);
+	if (status)
+		goto err;
+	enet_node = of_find_node_by_phandle(enet_ph);
+	priv->enet_pdev = of_find_device_by_node(enet_node);
+	of_node_put(enet_node);
+	if (!priv->enet_pdev) {
+		dev_err(&pdev->dev,
+			"can't find associated network interface\n");
+		return -EINVAL;
+	}
+
+	macaddr = of_get_mac_address(of_node);
+	if (!macaddr) {
+		dev_err(&pdev->dev, "can't find MAC address\n");
+		return -EINVAL;
+	}
+
+	mac_to_u32(&pd.macaddr_hi, &pd.macaddr_lo, macaddr);
+
+	/* defaults for optional entries.  All other defaults are 0 */
+	pd.use_dma = 1;
+
+	status = of_property_read_string(of_node, "rf-band", &rfb);
+	if (!status) {
+		for (i = 0; i < MOCA_BAND_MAX; i++) {
+			if (strcmp(rfb, of_rfb[i]) == 0) {
+				pd.rf_band = i;
+				dev_info(&pdev->dev, "using %s(%d) band\n",
+					 of_rfb[i], i);
+				break;
+			}
+		}
+	}
+
+	if (status || i == MOCA_BAND_MAX) {
+		dev_warn(&pdev->dev, "Defaulting to rf-band %s\n", of_rfb[0]);
+		pd.rf_band = 0;
+	}
+
+	/* optional entries */
+	of_property_read_u32(of_node, "i2c-base", &pd.bcm3450_i2c_base);
+	of_property_read_u32(of_node, "i2c-addr", &pd.bcm3450_i2c_addr);
+	of_property_read_u32(of_node, "use-dma", &pd.use_dma);
+	of_property_read_u32(of_node, "use-spi", &pd.use_spi);
+	status = of_property_read_u32(of_node, "chip-id", &pd.chip_id);
+	if (status)
+		pd.chip_id = (BRCM_CHIP_ID() << 16) | (BRCM_CHIP_REV() + 0xa0);
+
+	status = platform_device_add_data(pdev, &pd, sizeof(pd));
+err:
+	return status;
+
+}
+
+static const struct of_device_id bmoca_instance_match[] = {
+	{ .compatible = "brcm,bmoca-instance" },
+	{},
+};
+
+MODULE_DEVICE_TABLE(bmoca, bmoca_instance_match);
+#endif
+
+#ifdef CONFIG_PM
+void moca_prepare_suspend(struct moca_priv_data *priv)
+{
+	int rc;
+#define MOCA_SUSPEND_TIMEOUT (3 * HZ)
+	mutex_lock(&priv->dev_mutex);
+	switch (priv->state) {
+	case MOCA_ACTIVE:
+		/*
+		 * MOCA is active is online. Set state to MOCA_SUSPENDING and
+		 * notify user space daemon to go into hostless mode
+		 */
+		rc = moca_send_pm_trap(priv, MOCA_SUSPENDING);
+		if (rc != 0) {
+			/* we did not send the trap successfully */
+			moca_set_pm_state(priv, MOCA_SUSPENDING);
+			mutex_unlock(&priv->dev_mutex);
+			return;
+		}
+		/* wait for the ACK from mocad */
+		moca_set_pm_state(priv, MOCA_SUSPENDING_WAITING_ACK);
+		mutex_unlock(&priv->dev_mutex);
+		rc = wait_for_completion_timeout(&priv->suspend_complete,
+							 MOCA_SUSPEND_TIMEOUT);
+		/* completion timed out set the state to SUSPENDED */
+		if (!rc) {
+			dev_err(priv->dev, "suspend timeout\n");
+		}
+		break;
+	default:
+		dev_warn(priv->dev, "device not in MOCA_ACTIVE state\n");
+		mutex_unlock(&priv->dev_mutex);
+		break;
+	}
+}
+
+void moca_complete_resume(struct moca_priv_data *priv)
+{
+	int rc;
+	mutex_lock(&priv->dev_mutex);
+	if (priv->state != MOCA_RESUMING) {
+		dev_warn(priv->dev, "state %s should be %s\n",
+			 moca_state_string[priv->state],
+			 moca_state_string[MOCA_RESUMING]);
+		mutex_unlock(&priv->dev_mutex);
+		return;
+	}
+
+	/* Send a trap to moca firmware so that mocad resumes in host mode */
+	rc = moca_send_pm_trap(priv, MOCA_ACTIVE);
+	if (rc != 0) {
+		dev_warn(priv->dev, "could not send MOCA_ACTIVE trap\n");
+	}
+
+	moca_set_pm_state(priv, MOCA_ACTIVE);
+
+	mutex_unlock(&priv->dev_mutex);
+}
+
+static int moca_pm_notifier(struct notifier_block *notifier,
+			     unsigned long pm_event,
+			     void *unused)
+{
+    struct moca_priv_data *priv = container_of(notifier,
+					       struct moca_priv_data,
+					       pm_notifier);
+    dev_info(priv->dev, "%s for state %lu", __func__, pm_event);
+    switch (pm_event) {
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		moca_prepare_suspend(priv);
+		break;
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+	case PM_POST_RESTORE:
+		moca_complete_resume(priv);
+		break;
+	case PM_RESTORE_PREPARE:
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static int moca_register_pm_notifier(struct moca_priv_data *priv)
+{
+    priv->pm_notifier.notifier_call = moca_pm_notifier;
+    return register_pm_notifier(&priv->pm_notifier);
+}
+
+static int moca_unregister_pm_notifier(struct moca_priv_data *priv)
+{
+    return unregister_pm_notifier(&priv->pm_notifier);
+}
+#endif
+
+static int moca_probe(struct platform_device *pdev)
+{
+	struct moca_priv_data *priv;
+	struct resource *mres, *ires;
+	int minor, err;
+	struct moca_platform_data *pd;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(&pdev->dev, "out of memory\n");
+		return -ENOMEM;
+	}
+	dev_set_drvdata(&pdev->dev, priv);
+	priv->pdev = pdev;
+	priv->start_time = jiffies;
+
+#if defined(CONFIG_OF)
+	err = moca_parse_dt_node(priv);
+	if (err)
+		goto bad;
+#endif
+	priv->clk = moca_clk_get(&pdev->dev, "moca");
+	priv->cpu_clk = moca_clk_get(&pdev->dev, "moca-cpu");
+	priv->phy_clk = moca_clk_get(&pdev->dev, "moca-phy");
+
+	pd = pdev->dev.platform_data;
+	priv->hw_rev = pd->hw_rev;
+
+	if (pd->hw_rev == HWREV_MOCA_11_PLUS)
+		priv->regs = &regs_11_plus;
+	else if (pd->hw_rev == HWREV_MOCA_11_LITE)
+		priv->regs = &regs_11_lite;
+	else if (pd->hw_rev == HWREV_MOCA_11)
+		priv->regs = &regs_11;
+	else if ((pd->hw_rev == HWREV_MOCA_20_ALT) ||
+		(pd->hw_rev == HWREV_MOCA_20_GEN21) ||
+		(pd->hw_rev == HWREV_MOCA_20_GEN22) ||
+		(pd->hw_rev == HWREV_MOCA_20_GEN23))
+		priv->regs = &regs_20;
+	else {
+		dev_err(&pdev->dev, "unsupported MoCA HWREV: %x\n",
+			pd->hw_rev);
+		err = -EINVAL;
+		goto bad;
+	}
+
+	init_waitqueue_head(&priv->host_msg_wq);
+	init_waitqueue_head(&priv->core_msg_wq);
+	init_completion(&priv->copy_complete);
+	init_completion(&priv->chunk_complete);
+	init_completion(&priv->suspend_complete);
+
+	spin_lock_init(&priv->list_lock);
+	spin_lock_init(&priv->clock_lock);
+	spin_lock_init(&priv->irq_status_lock);
+
+	mutex_init(&priv->dev_mutex);
+	mutex_init(&priv->copy_mutex);
+	mutex_init(&priv->moca_i2c_mutex);
+
+	sg_init_table(priv->fw_sg, MAX_FW_PAGES);
+
+	INIT_WORK(&priv->work, moca_work_handler);
+
+	priv->minor = -1;
+	for (minor = 0; minor < NUM_MINORS; minor++) {
+		if (minor_tbl[minor] == NULL) {
+			priv->minor = minor;
+			break;
+		}
+	}
+
+	if (priv->minor == -1) {
+		dev_err(&pdev->dev, "can't allocate minor device\n");
+		err = -EIO;
+		goto bad;
+	}
+
+	mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!mres || !ires) {
+		dev_err(&pdev->dev, "can't get resources\n");
+		err = -EIO;
+		goto bad;
+	}
+	priv->base = ioremap(mres->start, mres->end - mres->start + 1);
+	priv->irq = ires->start;
+
+	if (pd->bcm3450_i2c_base)
+		priv->i2c_base = ioremap(pd->bcm3450_i2c_base,
+			sizeof(struct bsc_regs));
+
+	/* leave core in reset until we get an ioctl */
+
+	moca_hw_reset(priv);
+
+	if (request_irq(priv->irq, moca_interrupt, 0, "moca",
+			priv) < 0) {
+		dev_err(&pdev->dev, "can't request interrupt\n");
+		err = -EIO;
+		goto bad2;
+	}
+	moca_hw_init(priv, MOCA_ENABLE);
+	moca_disable_irq(priv);
+	moca_msg_reset(priv);
+	moca_hw_init(priv, MOCA_DISABLE);
+
+	dev_info(&pdev->dev,
+		 "adding minor #%d@0x%08llx, IRQ %d, I2C 0x%08llx/0x%02x\n",
+		 priv->minor, (unsigned long long)mres->start, ires->start,
+		 (unsigned long long)pd->bcm3450_i2c_base,
+		 pd->bcm3450_i2c_addr);
+
+	minor_tbl[priv->minor] = priv;
+	priv->dev = device_create(moca_class, NULL,
+		MKDEV(MOCA_MAJOR, priv->minor), NULL, "bmoca%d", priv->minor);
+	if (IS_ERR(priv->dev)) {
+		dev_warn(&pdev->dev, "can't register class device\n");
+		priv->dev = NULL;
+	}
+
+#ifdef CONFIG_PM
+	err = moca_register_pm_notifier(priv);
+	if (err) {
+		dev_err(&pdev->dev, "register_pm_notifier failed err %d\n",
+			err);
+		goto bad2;
+	}
+#endif
+
+	return 0;
+
+bad2:
+	if (priv->base)
+		iounmap(priv->base);
+	if (priv->i2c_base)
+		iounmap(priv->i2c_base);
+bad:
+	kfree(priv);
+	return err;
+}
+
+static int moca_remove(struct platform_device *pdev)
+{
+	struct moca_priv_data *priv = dev_get_drvdata(&pdev->dev);
+	struct clk *clk = priv->clk;
+	struct clk *phy_clk = priv->phy_clk;
+	struct clk *cpu_clk = priv->cpu_clk;
+	int err = 0;
+
+	if (priv->dev)
+		device_destroy(moca_class, MKDEV(MOCA_MAJOR, priv->minor));
+	minor_tbl[priv->minor] = NULL;
+
+	free_irq(priv->irq, priv);
+	if (priv->i2c_base)
+		iounmap(priv->i2c_base);
+	if (priv->base)
+		iounmap(priv->base);
+	kfree(priv);
+
+	moca_clk_put(cpu_clk);
+	moca_clk_put(phy_clk);
+	moca_clk_put(clk);
+
+#ifdef CONFIG_PM
+	err = moca_unregister_pm_notifier(priv);
+	if (err) {
+		dev_err(&pdev->dev, "unregister_pm_notifier failed err %d\n",
+			err);
+	}
+#endif
+
+	return err;
+}
+
+#ifdef CONFIG_PM
+static int moca_suspend(struct device *dev)
+{
+	int minor;
+
+	for (minor = 0; minor < NUM_MINORS; minor++) {
+		struct moca_priv_data *priv = minor_tbl[minor];
+		if (priv && priv->enabled) {
+
+			mutex_lock(&priv->dev_mutex);
+			switch (priv->state) {
+			case MOCA_SUSPENDING_GOT_ACK:
+				moca_set_pm_state(priv, MOCA_SUSPENDED);
+				break;
+
+			case MOCA_SUSPENDING:
+			case MOCA_SUSPENDING_WAITING_ACK:
+			default:
+				dev_warn(priv->dev, "state %s should be %s\n",
+					 moca_state_string[priv->state],
+					 moca_state_string[MOCA_SUSPENDING_GOT_ACK]);
+			}
+			mutex_unlock(&priv->dev_mutex);
+		}
+	}
+	return 0;
+}
+
+static int moca_resume(struct device *dev)
+{
+	int minor;
+
+	for (minor = 0; minor < NUM_MINORS; minor++) {
+		struct moca_priv_data *priv = minor_tbl[minor];
+		if (priv && priv->enabled) {
+			mutex_lock(&priv->dev_mutex);
+			if (priv->state != MOCA_SUSPENDED) {
+				dev_warn(priv->dev, "state %s should be %s\n",
+					 moca_state_string[priv->state],
+					 moca_state_string[MOCA_SUSPENDED]);
+
+				if (MOCA_RD(priv->base +
+					    priv->regs->sw_reset_offset)
+				    & RESET_MOCA_SYS) {
+					/*
+					 * If we lost power to the block
+					 * (e.g. unclean S3 transition), but the
+					 * driver still thinks the core is
+					 * enabled, try to get things back in sync.
+					 */
+					priv->enabled = 0;
+					moca_msg_reset(priv);
+				}
+			}
+			moca_set_pm_state(priv, MOCA_RESUMING);
+			mutex_unlock(&priv->dev_mutex);
+		}
+	}
+	return 0;
+}
+
+static const struct dev_pm_ops moca_pm_ops = {
+	.suspend		= moca_suspend,
+	.resume			= moca_resume,
+};
+
+#endif
+
+static struct platform_driver moca_plat_drv = {
+	.probe =		moca_probe,
+	.remove =		moca_remove,
+	.driver = {
+		.name =		"bmoca",
+		.owner =	THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm =		&moca_pm_ops,
+#endif
+#ifdef CONFIG_OF
+		.of_match_table = of_match_ptr(bmoca_instance_match),
+#endif
+	},
+};
+
+static int moca_init(void)
+{
+	int ret;
+	memset(minor_tbl, 0, sizeof(minor_tbl));
+	ret = register_chrdev(MOCA_MAJOR, MOCA_CLASS, &moca_fops);
+	if (ret < 0) {
+		pr_err("can't register major %d\n", MOCA_MAJOR);
+		goto bad;
+	}
+
+	moca_class = class_create(THIS_MODULE, MOCA_CLASS);
+	if (IS_ERR(moca_class)) {
+		pr_err("can't create device class\n");
+		ret = PTR_ERR(moca_class);
+		goto bad2;
+	}
+
+	ret = platform_driver_register(&moca_plat_drv);
+	if (ret < 0) {
+		pr_err("can't register platform_driver\n");
+		goto bad3;
+	}
+
+
+
+	return 0;
+
+bad3:
+	class_destroy(moca_class);
+bad2:
+	unregister_chrdev(MOCA_MAJOR, MOCA_CLASS);
+bad:
+	return ret;
+}
+
+static void moca_exit(void)
+{
+	platform_driver_unregister(&moca_plat_drv);
+	class_destroy(moca_class);
+	unregister_chrdev(MOCA_MAJOR, MOCA_CLASS);
+}
+
+module_init(moca_init);
+module_exit(moca_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("MoCA messaging driver");
diff --git a/3.8/bmoca.c b/3.8/bmoca.c
index b55a926..471e4ed 100644
--- a/3.8/bmoca.c
+++ b/3.8/bmoca.c
@@ -46,6 +46,7 @@
 #include <linux/clk-provider.h>
 #include <linux/clk/clk-brcmstb.h>
 #include <linux/netdevice.h>
+#include <linux/suspend.h>
 
 #define DRV_VERSION		0x00040000
 #define DRV_BUILD_NUMBER	0x20110831
@@ -124,7 +125,18 @@
 #define M2M_READ		(BIT(30) | BIT(27))
 #endif
 
-#define M2M_TIMEOUT_MS		10
+#define RESET_HIGH_CPU		BIT(0)
+#define RESET_MOCA_SYS		BIT(1)
+#define RESET_LOW_CPU		BIT(2)
+
+#define RESET_GMII		BIT(3)
+#define RESET_PHY_0		BIT(4)
+#define RESET_PHY_1		BIT(5)
+#define DISABLE_CLOCKS		BIT(7)
+#define DISABLE_PHY_0_CLOCK	BIT(8)
+#define DISABLE_PHY_1_CLOCK	BIT(9)
+
+#define M2M_TIMEOUT_MS		100
 
 #define NO_FLUSH_IRQ		0
 #define FLUSH_IRQ		1
@@ -132,6 +144,7 @@
 #define FLUSH_REQRESP_ONLY	3
 
 #define DEFAULT_PHY_CLOCK	(300 * 1000000)
+#define MOCA_SUSPEND_TIMEOUT_MS 300
 
 /* DMA buffers may not share a cache line with anything else */
 #define __DMA_ALIGN__		__aligned(L1_CACHE_BYTES)
@@ -183,7 +196,7 @@
 	struct work_struct	work;
 	void __iomem		*base;
 	void __iomem		*i2c_base;
-	struct platform_device	*enet_pdev;
+	struct device_node	*enet_node;
 
 	unsigned int		mbx_offset[2]; /* indexed by MoCA cpu */
 	struct page		*fw_pages[MAX_FW_PAGES];
@@ -243,6 +256,11 @@
 	unsigned int		host_resp_offset;
 	unsigned int		core_req_offset;
 	unsigned int		core_resp_offset;
+
+	/* for user space suspend/resume notifications */
+	struct notifier_block	pm_notifier;
+	enum moca_pm_states	state;
+	struct completion	suspend_complete;
 };
 
 static const struct moca_regs regs_11_plus = {
@@ -367,6 +385,15 @@
 	u32			scl_param;
 };
 
+static const char * const __maybe_unused moca_state_string[] = {
+	[MOCA_ACTIVE] = "active",
+	[MOCA_SUSPENDING] = "suspending",
+	[MOCA_SUSPENDING_WAITING_ACK] = "suspending waiting for ACK",
+	[MOCA_SUSPENDING_GOT_ACK] = "suspending got ACK",
+	[MOCA_SUSPENDED] = "suspended",
+	[MOCA_RESUMING] = "resuming",
+};
+
 /* support for multiple MoCA devices */
 #define NUM_MINORS		8
 static struct moca_priv_data *minor_tbl[NUM_MINORS];
@@ -490,21 +517,19 @@
 	/* assert resets */
 
 	/* reset CPU first, both CPUs for MoCA 20 HW */
-	if (moca_is_20(priv))
-		MOCA_SET(priv->base + r->sw_reset_offset, 5);
-	else
-		MOCA_SET(priv->base + r->sw_reset_offset, 1);
-
+	MOCA_SET(priv->base + r->sw_reset_offset, RESET_HIGH_CPU |
+		 (moca_is_20(priv) ? RESET_LOW_CPU : 0));
 	MOCA_RD(priv->base + r->sw_reset_offset);
 
 	udelay(20);
 
 	/* reset everything else except clocks */
-	MOCA_SET(priv->base + r->sw_reset_offset, ~(BIT(3) | BIT(7)));
+	MOCA_SET(priv->base + r->sw_reset_offset,
+		 ~(RESET_GMII | DISABLE_CLOCKS));
 	MOCA_RD(priv->base + r->sw_reset_offset);
 
 	/* disable clocks */
-	MOCA_SET(priv->base + r->sw_reset_offset, ~BIT(3));
+	MOCA_SET(priv->base + r->sw_reset_offset, ~RESET_GMII);
 	MOCA_RD(priv->base + r->sw_reset_offset);
 
 	MOCA_WR(priv->base + r->l2_clear_offset, 0xffffffff);
@@ -546,17 +571,18 @@
 
 	if (action == MOCA_ENABLE) {
 		/* deassert moca_sys_reset and clock */
-		MOCA_UNSET(priv->base + r->sw_reset_offset, BIT(1) | BIT(7));
+		MOCA_UNSET(priv->base + r->sw_reset_offset,
+			   RESET_MOCA_SYS | DISABLE_CLOCKS);
 
 		if (priv->hw_rev >= HWREV_MOCA_20_GEN22) {
 			/* Take PHY0 out of reset and enable clock */
 			MOCA_UNSET(priv->base + r->sw_reset_offset,
-				   BIT(4) | BIT(8));
+				   RESET_PHY_0 | DISABLE_PHY_0_CLOCK);
 
 			if (priv->bonded_mode) {
 				/* Take PHY1 out of reset and enable clock */
 				MOCA_UNSET(priv->base + r->sw_reset_offset,
-					   BIT(5) | BIT(9));
+					   RESET_PHY_1 | DISABLE_PHY_1_CLOCK);
 			}
 		}
 		MOCA_RD(priv->base + r->sw_reset_offset);
@@ -663,13 +689,15 @@
 
 	if (moca_is_20(priv)) {
 		if (cpu == 1)
-			MOCA_UNSET(priv->base + r->sw_reset_offset, BIT(0));
+			MOCA_UNSET(priv->base + r->sw_reset_offset,
+				   RESET_HIGH_CPU);
 		else {
 			moca_mmp_init(priv, 1);
-			MOCA_UNSET(priv->base + r->sw_reset_offset, BIT(2));
+			MOCA_UNSET(priv->base + r->sw_reset_offset,
+				   RESET_LOW_CPU);
 		}
 	} else
-		MOCA_UNSET(priv->base + r->sw_reset_offset, BIT(0));
+		MOCA_UNSET(priv->base + r->sw_reset_offset, RESET_HIGH_CPU);
 	MOCA_RD(priv->base + r->sw_reset_offset);
 	return 0;
 }
@@ -679,6 +707,7 @@
 {
 	const struct moca_regs *r = priv->regs;
 	u32 status;
+	long timeout = msecs_to_jiffies(M2M_TIMEOUT_MS);
 
 	MOCA_WR(priv->base + r->m2m_src_offset, src);
 	MOCA_WR(priv->base + r->m2m_dst_offset, dst);
@@ -686,8 +715,7 @@
 	MOCA_RD(priv->base + r->m2m_status_offset);
 	MOCA_WR(priv->base + r->m2m_cmd_offset, ctl);
 
-	if (wait_for_completion_timeout(&priv->copy_complete,
-		1000 * M2M_TIMEOUT_MS) <= 0) {
+	if (wait_for_completion_timeout(&priv->copy_complete, timeout) == 0) {
 		dev_warn(priv->dev, "DMA interrupt timed out, status %x\n",
 			 moca_irq_status(priv, NO_FLUSH_IRQ));
 	}
@@ -832,6 +860,7 @@
 	int pages, i, ret = -EINVAL;
 	struct moca_fw_hdr hdr;
 	u32 bl_chunks;
+	long timeout = msecs_to_jiffies(M2M_TIMEOUT_MS);
 
 	if (copy_from_user(&hdr, (void __user *)(unsigned long)x->buf,
 			sizeof(hdr)))
@@ -858,8 +887,8 @@
 
 	/* wait for an ACK, then write each successive chunk */
 	for (i = bl_chunks + 1; i < pages; i++) {
-		if (wait_for_completion_timeout(&priv->chunk_complete,
-				1000 * M2M_TIMEOUT_MS) <= 0) {
+		if (wait_for_completion_timeout(&priv->chunk_complete, timeout)
+		    == 0) {
 			moca_disable_irq(priv);
 			dev_warn(priv->dev, "chunk ack timed out\n");
 			ret = -EIO;
@@ -872,8 +901,7 @@
 
 	/* wait for ACK of last block.  Older firmware images didn't
 	   ACK the last block, so don't return an error */
-	wait_for_completion_timeout(&priv->chunk_complete,
-			1000 * M2M_TIMEOUT_MS / 10);
+	wait_for_completion_timeout(&priv->chunk_complete, timeout);
 
 out:
 	moca_put_pages(priv, pages);
@@ -1277,6 +1305,47 @@
 	return 0;
 }
 
+/*
+ * Caller is assumed hold the mutex lock before changing the PM
+ * state
+ */
+static void moca_set_pm_state(struct moca_priv_data *priv,
+			      enum moca_pm_states state)
+{
+	dev_info(priv->dev, "state %s -> %s\n", moca_state_string[priv->state],
+		 moca_state_string[state]);
+	priv->state = state;
+}
+
+static int __maybe_unused moca_send_pm_trap(struct moca_priv_data *priv,
+					     enum moca_pm_states state)
+{
+	struct list_head *ml = NULL;
+	struct moca_core_msg *m;
+
+	ml = moca_detach_head(priv, &priv->core_msg_free_list);
+	if (ml == NULL) {
+		dev_warn(priv->dev, "no entries left on core_msg_free_list\n");
+		return -ENOMEM;
+	}
+
+	if (priv->mmp_20) {
+		/*
+		 * generate an IE_PM_NOTIFICATION trap to the user space
+		 */
+		m = list_entry(ml, struct moca_core_msg, chain);
+		m->data[0] = cpu_to_be32(0x3);
+		m->data[1] = cpu_to_be32(8);
+		m->data[2] = cpu_to_be32(0x11014);
+		m->data[3] = cpu_to_be32(state);
+		m->len = 16;
+		moca_attach_tail(priv, ml, &priv->core_msg_pend_list);
+		wake_up(&priv->core_msg_wq);
+	}
+
+	return 0;
+}
+
 static int moca_get_mbx_offset(struct moca_priv_data *priv)
 {
 	const struct moca_regs *r = priv->regs;
@@ -1480,7 +1549,7 @@
 static int moca_3450_wait(struct moca_priv_data *priv)
 {
 	struct bsc_regs *bsc = priv->i2c_base;
-	long timeout = HZ / 1000;	/* 1ms */
+	long timeout = HZ / 1000; /* 1ms */
 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait);
 	int i = 0;
 
@@ -1540,52 +1609,6 @@
 #define BCM3450_PACNTL		0x18
 #define BCM3450_MISC		0x1c
 
-static int moca_3450_get_reg(struct moca_priv_data *priv, unsigned int  *arg)
-{
-	struct moca_xfer x;
-	u32 *dst;
-	u32 val;
-
-	if (!priv->i2c_base)
-		return -ENODEV;
-
-	if (copy_from_user(&x, (void __user *)arg, sizeof(x)))
-		return -EFAULT;
-
-	dst = (u32 *)(unsigned long)x.buf;
-
-	mutex_lock(&priv->moca_i2c_mutex);
-	val = moca_3450_read(priv, x.moca_addr);
-	mutex_unlock(&priv->moca_i2c_mutex);
-
-	if (put_user(val, dst))
-		return -EFAULT;
-
-	return 0;
-}
-
-static int moca_3450_set_reg(struct moca_priv_data *priv, unsigned int  *arg)
-{
-	struct moca_xfer x;
-	u32 val;
-
-	if (!priv->i2c_base)
-		return -ENODEV;
-
-	if (copy_from_user(&x, (void __user *)arg, sizeof(x)))
-		return -EFAULT;
-
-	mutex_lock(&priv->moca_i2c_mutex);
-
-	if (get_user(val, (u32 *)(unsigned long)x.buf))
-		return -EFAULT;
-
-	moca_3450_write(priv, x.moca_addr, val);
-	mutex_unlock(&priv->moca_i2c_mutex);
-
-	return 0;
-}
-
 static void moca_3450_init(struct moca_priv_data *priv, int action)
 {
 	u32 data;
@@ -1792,17 +1815,17 @@
 	info.rf_band = pd->rf_band;
 	info.phy_freq = priv->phy_freq;
 
-	if (priv->enet_pdev && get_device(&priv->enet_pdev->dev)) {
+	if (priv->enet_node) {
 		struct net_device *enet_dev;
+
 		rcu_read_lock();
-		enet_dev = platform_get_drvdata(priv->enet_pdev);
+		enet_dev = of_find_net_device_by_node(priv->enet_node);
 		if (enet_dev) {
 			dev_hold(enet_dev);
 			strlcpy(info.enet_name, enet_dev->name, IFNAMSIZ);
 			dev_put(enet_dev);
 		}
 		rcu_read_unlock();
-		put_device(&priv->enet_pdev->dev);
 		info.enet_id = MOCA_IFNAME_USE_ID;
 	} else {
 		strlcpy(info.enet_name, pd->enet_name, IFNAMSIZ);
@@ -1913,6 +1936,14 @@
 				priv->running = 1;
 		}
 		break;
+#ifdef CONFIG_PM
+	case MOCA_IOCTL_PM_SUSPEND:
+	case MOCA_IOCTL_PM_WOL:
+		if (priv->state == MOCA_SUSPENDING_WAITING_ACK)
+			complete(&priv->suspend_complete);
+		ret = 0;
+		break;
+#endif
 	case MOCA_IOCTL_STOP:
 		moca_msg_reset(priv);
 		moca_3450_init(priv, MOCA_DISABLE);
@@ -1959,13 +1990,6 @@
 			ret = clk_set_rate(priv->phy_clk,
 						     (unsigned int)arg);
 		break;
-	case MOCA_IOCTL_GET_3450_REG:
-		ret = moca_3450_get_reg(priv, (unsigned int *)arg);
-		break;
-	case MOCA_IOCTL_SET_3450_REG:
-		ret = moca_3450_set_reg(priv, (unsigned int *)arg);
-		break;
-
 	}
 	mutex_unlock(&priv->dev_mutex);
 
@@ -2210,7 +2234,7 @@
 {
 	struct platform_device *pdev = priv->pdev;
 	struct moca_platform_data pd;
-	struct device_node *of_node = pdev->dev.of_node, *enet_node;
+	struct device_node *of_node = pdev->dev.of_node;
 	phandle enet_ph;
 	int status = 0, i = 0;
 	const u8 *macaddr;
@@ -2220,6 +2244,29 @@
 	memset(&pd, 0, sizeof(pd));
 
 	/* mandatory entries */
+
+	/* get the common clocks from bmoca node */
+	priv->clk = of_clk_get_by_name(of_node, "sw_moca");
+	if (IS_ERR(priv->clk)) {
+		dev_err(&pdev->dev,
+			"can't find sw_moca clk\n");
+		priv->clk = NULL;
+	}
+
+	priv->cpu_clk = of_clk_get_by_name(of_node, "sw_moca_cpu");
+	if (IS_ERR(priv->cpu_clk)) {
+		dev_err(&pdev->dev,
+			"can't find moca_cpu clk\n");
+		priv->cpu_clk = NULL;
+	}
+
+	priv->phy_clk = of_clk_get_by_name(of_node, "sw_moca_phy");
+	if (IS_ERR(priv->phy_clk)) {
+		dev_err(&pdev->dev,
+			"can't find moca_phy clk\n");
+		priv->phy_clk = NULL;
+	}
+
 	status = of_property_read_u32(of_node, "hw-rev", &pd.hw_rev);
 	if (status)
 		goto err;
@@ -2227,10 +2274,8 @@
 	status = of_property_read_u32(of_node, "enet-id", &enet_ph);
 	if (status)
 		goto err;
-	enet_node = of_find_node_by_phandle(enet_ph);
-	priv->enet_pdev = of_find_device_by_node(enet_node);
-	of_node_put(enet_node);
-	if (!priv->enet_pdev) {
+	priv->enet_node = of_find_node_by_phandle(enet_ph);
+	if (!priv->enet_node) {
 		dev_err(&pdev->dev,
 			"can't find associated network interface\n");
 		return -EINVAL;
@@ -2269,12 +2314,9 @@
 	of_property_read_u32(of_node, "i2c-addr", &pd.bcm3450_i2c_addr);
 	of_property_read_u32(of_node, "use-dma", &pd.use_dma);
 	of_property_read_u32(of_node, "use-spi", &pd.use_spi);
-
-#if defined(CONFIG_BCM7145A0)
-	pd.chip_id = 0x714500a0;
-#else
-	pd.chip_id = (BRCM_CHIP_ID() << 16) | (BRCM_CHIP_REV() + 0xa0);
-#endif
+	status = of_property_read_u32(of_node, "chip-id", &pd.chip_id);
+	if (status)
+		pd.chip_id = (BRCM_CHIP_ID() << 16) | (BRCM_CHIP_REV() + 0xa0);
 
 	status = platform_device_add_data(pdev, &pd, sizeof(pd));
 err:
@@ -2290,6 +2332,128 @@
 MODULE_DEVICE_TABLE(bmoca, bmoca_instance_match);
 #endif
 
+static int moca_in_reset(struct moca_priv_data *priv)
+{
+	if (MOCA_RD(priv->base + priv->regs->sw_reset_offset)
+	    & RESET_MOCA_SYS) {
+		/*
+		 * If we lost power to the block
+		 * (e.g. unclean S3 transition)
+		 */
+		return 1;
+	}
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static void moca_prepare_suspend(struct moca_priv_data *priv)
+{
+	int rc;
+	long timeout = msecs_to_jiffies(MOCA_SUSPEND_TIMEOUT_MS);
+
+	mutex_lock(&priv->dev_mutex);
+
+	if (moca_in_reset(priv)) {
+		dev_warn(priv->dev, "MoCA core powered off\n");
+		goto out;
+	}
+
+	switch (priv->state) {
+	case MOCA_ACTIVE:
+		/*
+		 * MOCA is active is online. Set state to MOCA_SUSPENDING and
+		 * notify user space daemon to go into hostless mode
+		 */
+		rc = moca_send_pm_trap(priv, MOCA_SUSPENDING);
+		if (rc != 0)
+			goto out;
+
+		moca_set_pm_state(priv, MOCA_SUSPENDING_WAITING_ACK);
+		mutex_unlock(&priv->dev_mutex);
+		/* wait for the ACK from mocad */
+		rc = wait_for_completion_timeout(&priv->suspend_complete,
+						 timeout);
+		if (!rc)
+			dev_err(priv->dev, "suspend timeout\n");
+
+		mutex_lock(&priv->dev_mutex);
+		break;
+	default:
+		dev_warn(priv->dev, "device not in MOCA_ACTIVE state\n");
+		break;
+	}
+
+out:
+	moca_set_pm_state(priv, MOCA_SUSPENDING_GOT_ACK);
+	mutex_unlock(&priv->dev_mutex);
+}
+
+static void moca_complete_resume(struct moca_priv_data *priv)
+{
+	int rc;
+
+	mutex_lock(&priv->dev_mutex);
+	if (moca_in_reset(priv)) {
+		dev_warn(priv->dev, "MoCA core in reset\n");
+		goto out;
+	}
+
+	if (priv->state != MOCA_RESUMING) {
+		dev_warn(priv->dev, "state %s should be %s\n",
+			 moca_state_string[priv->state],
+			 moca_state_string[MOCA_RESUMING]);
+		goto out;
+	}
+
+	/* Send a trap to moca firmware so that mocad resumes in host mode */
+	rc = moca_send_pm_trap(priv, MOCA_ACTIVE);
+	if (rc != 0)
+		dev_warn(priv->dev, "could not send MOCA_ACTIVE trap\n");
+
+out :
+	moca_set_pm_state(priv, MOCA_ACTIVE);
+	mutex_unlock(&priv->dev_mutex);
+}
+
+static int moca_pm_notifier(struct notifier_block *notifier,
+			     unsigned long pm_event,
+			     void *unused)
+{
+	struct moca_priv_data *priv = container_of(notifier,
+						   struct moca_priv_data,
+						   pm_notifier);
+
+	switch (pm_event) {
+		dev_info(priv->dev, "%s for state %lu", __func__, pm_event);
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		moca_prepare_suspend(priv);
+		break;
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+	case PM_POST_RESTORE:
+		moca_complete_resume(priv);
+		break;
+	case PM_RESTORE_PREPARE:
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static int moca_register_pm_notifier(struct moca_priv_data *priv)
+{
+	priv->pm_notifier.notifier_call = moca_pm_notifier;
+	return register_pm_notifier(&priv->pm_notifier);
+}
+
+static int moca_unregister_pm_notifier(struct moca_priv_data *priv)
+{
+	return unregister_pm_notifier(&priv->pm_notifier);
+}
+#endif
+
 static int moca_probe(struct platform_device *pdev)
 {
 	struct moca_priv_data *priv;
@@ -2311,10 +2475,6 @@
 	if (err)
 		goto bad;
 #endif
-	priv->clk = clk_get(&pdev->dev, "moca");
-	priv->cpu_clk = clk_get(&pdev->dev, "moca-cpu");
-	priv->phy_clk = clk_get(&pdev->dev, "moca-phy");
-
 	pd = pdev->dev.platform_data;
 	priv->hw_rev = pd->hw_rev;
 
@@ -2340,6 +2500,7 @@
 	init_waitqueue_head(&priv->core_msg_wq);
 	init_completion(&priv->copy_complete);
 	init_completion(&priv->chunk_complete);
+	init_completion(&priv->suspend_complete);
 
 	spin_lock_init(&priv->list_lock);
 	spin_lock_init(&priv->clock_lock);
@@ -2410,6 +2571,15 @@
 		priv->dev = NULL;
 	}
 
+#ifdef CONFIG_PM
+	err = moca_register_pm_notifier(priv);
+	if (err) {
+		dev_err(&pdev->dev, "register_pm_notifier failed err %d\n",
+			err);
+		goto bad2;
+	}
+#endif
+
 	return 0;
 
 bad2:
@@ -2428,6 +2598,7 @@
 	struct clk *clk = priv->clk;
 	struct clk *phy_clk = priv->phy_clk;
 	struct clk *cpu_clk = priv->cpu_clk;
+	int err = 0;
 
 	if (priv->dev)
 		device_destroy(moca_class, MKDEV(MOCA_MAJOR, priv->minor));
@@ -2438,26 +2609,86 @@
 		iounmap(priv->i2c_base);
 	if (priv->base)
 		iounmap(priv->base);
-	kfree(priv);
 
 	clk_put(cpu_clk);
 	clk_put(phy_clk);
 	clk_put(clk);
 
-	return 0;
+#ifdef CONFIG_PM
+	err = moca_unregister_pm_notifier(priv);
+	if (err) {
+		dev_err(&pdev->dev, "unregister_pm_notifier failed err %d\n",
+			err);
+	}
+#endif
+	kfree(priv);
+
+	return err;
 }
 
 #ifdef CONFIG_PM
 static int moca_suspend(struct device *dev)
 {
-	/* do not do anything on suspend.
-	MoCA core is not necessarily stopped */
+	int minor;
+	for (minor = 0; minor < NUM_MINORS; minor++) {
+		struct moca_priv_data *priv = minor_tbl[minor];
+		if (priv && priv->enabled) {
+			mutex_lock(&priv->dev_mutex);
 
+			if (moca_in_reset(priv)) {
+				moca_set_pm_state(priv, MOCA_SUSPENDED);
+				mutex_unlock(&priv->dev_mutex);
+				return 0;
+			}
+
+			switch (priv->state) {
+			case MOCA_SUSPENDING_GOT_ACK:
+				moca_set_pm_state(priv, MOCA_SUSPENDED);
+				break;
+
+			case MOCA_SUSPENDING:
+			case MOCA_SUSPENDING_WAITING_ACK:
+			default:
+				dev_warn(priv->dev, "state %s should be %s\n",
+				 moca_state_string[priv->state],
+				 moca_state_string[MOCA_SUSPENDING_GOT_ACK]);
+			}
+			mutex_unlock(&priv->dev_mutex);
+		}
+	}
 	return 0;
 }
 
 static int moca_resume(struct device *dev)
 {
+	int minor;
+
+	for (minor = 0; minor < NUM_MINORS; minor++) {
+		struct moca_priv_data *priv = minor_tbl[minor];
+		if (priv && priv->enabled) {
+			if (moca_in_reset(priv)) {
+				/*
+				 * If we lost power to the block
+				 * (e.g. unclean S3 transition), but
+				 * the driver still thinks the core is
+				 * enabled, try to get things back in
+				 * sync.
+				 */
+				priv->enabled = 0;
+				dev_warn(priv->dev, "sending moca reset\n");
+				moca_msg_reset(priv);
+			}
+
+			mutex_lock(&priv->dev_mutex);
+			if (priv->enabled && priv->state != MOCA_SUSPENDED)
+				dev_warn(priv->dev, "state %s should be %s\n",
+					 moca_state_string[priv->state],
+					 moca_state_string[MOCA_SUSPENDED]);
+
+			moca_set_pm_state(priv, MOCA_RESUMING);
+			mutex_unlock(&priv->dev_mutex);
+		}
+	}
 	return 0;
 }
 
diff --git a/bmoca-6802.c b/bmoca-6802.c
new file mode 100644
index 0000000..ecd69e8
--- /dev/null
+++ b/bmoca-6802.c
@@ -0,0 +1,1172 @@
+/*
+    <:copyright-BRCM:2013:DUAL/GPL:standard
+    
+       Copyright (c) 2013 Broadcom Corporation
+       All Rights Reserved
+    
+    Unless you and Broadcom execute a separate written software license
+    agreement governing use of this software, this software is licensed
+    to you under the terms of the GNU General Public License version 2
+    (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+    with the following added to such license:
+    
+       As a special exception, the copyright holders of this software give
+       you permission to link this software with independent modules, and
+       to copy and distribute the resulting executable under terms of your
+       choice, provided that you also meet, for each linked independent
+       module, the terms and conditions of the license of that module.
+       An independent module is a module which is not derived from this
+       software.  The special exception does not apply to any modifications
+       of the software.
+    
+    Not withstanding the above, under no circumstances may you combine
+    this software in any way with any other Broadcom software provided
+    under a license other than the GPL, without Broadcom's express prior
+    written consent.
+    
+    :> 
+
+*/
+
+#include "bbsi.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+#else
+typedef unsigned long uintptr_t;
+#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+
+#define MOCA_RD(x)    ((((struct moca_platform_data *)priv->pdev->dev.platform_data)->use_spi == 0) ? \
+                       (*((volatile uint32_t *)((unsigned long)(x)))) : \
+                       ((uint32_t)kerSysBcmSpiSlaveReadReg32(((struct moca_platform_data *)priv->pdev->dev.platform_data)->devId, (uint32_t)(x))))
+
+#define MOCA_RD8(x, y) ((((struct moca_platform_data *)priv->pdev->dev.platform_data)->use_spi == 0) ? \
+                        (*(y) = *((volatile unsigned char *)((unsigned long)(x)))) : \
+                        (kerSysBcmSpiSlaveRead(((struct moca_platform_data *)priv->pdev->dev.platform_data)->devId, (unsigned long)(x), y, 1)))
+
+#define MOCA_WR(x,y)   do { ((((struct moca_platform_data *)priv->pdev->dev.platform_data)->use_spi == 0) ? \
+                            (*((volatile uint32_t *)((unsigned long)(x)))) = (y) : \
+                            kerSysBcmSpiSlaveWriteReg32(((struct moca_platform_data *)priv->pdev->dev.platform_data)->devId, (uint32_t)(x), (y))); } while(0)
+
+#define MOCA_WR8(x,y)    do { ((((struct moca_platform_data *)priv->pdev->dev.platform_data)->use_spi == 0) ? \
+                               (*((volatile unsigned char *)((unsigned long)(x)))) = (unsigned char)(y) : \
+                               kerSysBcmSpiSlaveWrite(((struct moca_platform_data *)priv->pdev->dev.platform_data)->devId, (unsigned long)(x), (y), 1)); } while(0)
+
+#define MOCA_WR16(x,y)   do { ((((struct moca_platform_data *)priv->pdev->dev.platform_data)->use_spi == 0) ? \
+                               (*((volatile unsigned short *)((unsigned long)(x)))) = (unsigned short)(y) : \
+                               kerSysBcmSpiSlaveWrite(((struct moca_platform_data *)priv->pdev->dev.platform_data)->devId, (unsigned long)(x), (y), 2)); } while(0)
+
+#define MOCA_WR_BLOCK(addr, src, len) do { kerSysBcmSpiSlaveWriteBuf(((struct moca_platform_data *)priv->pdev->dev.platform_data)->devId, addr, src, len, 4); } while(0)
+#define MOCA_RD_BLOCK(addr, dst, len) do { kerSysBcmSpiSlaveReadBuf(((struct moca_platform_data *)priv->pdev->dev.platform_data)->devId, addr, dst, len, 4); } while(0)
+
+
+#define I2C_RD(x)		MOCA_RD(x)
+#define I2C_WR(x, y)		MOCA_WR(x, y)
+
+#define MOCA_BPCM_NUM         5
+#define MOCA_BPCM_ZONES_NUM   8
+
+#define MOCA_CPU_CLOCK_NUM  1
+#define MOCA_PHY_CLOCK_NUM  2
+
+typedef enum _PMB_COMMAND_E_
+{
+   PMB_COMMAND_PHY1_ON=0,
+   PMB_COMMAND_PARTIAL_ON,
+   PMB_COMMAND_PHY1_OFF,
+   PMB_COMMAND_ALL_OFF,
+
+   PMB_COMMAND_LAST
+} PMB_COMMAND_E;
+
+typedef enum _PMB_GIVE_OWNERSHIP_E_
+{
+   PMB_GIVE_OWNERSHIP_2_HOST = 0,
+   PMB_GIVE_OWNERSHIP_2_FW,
+
+   PMB_GET_OWNERSHIP_LAST
+} PMB_GIVE_OWNERSHIP_E;
+
+struct moca_680x_clk
+{
+	struct device *dev;
+	uint32_t       clock_num;
+};
+
+static uint32_t zone_all_off_bitmask[MOCA_BPCM_NUM] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+static uint32_t zone_partial_on_bitmask[MOCA_BPCM_NUM]  = { 0x41, 0xFC, 0xFF, 0xFF, 0x00 };
+static uint32_t zone_phy1_bitmask[MOCA_BPCM_NUM]  = { 0x00, 0x00, 0x00, 0x00, 0xFF };
+
+
+static void bogus_release(struct device *dev)
+{
+}
+
+static struct moca_platform_data moca_lan_data = {
+	.macaddr_hi =		0x00000102,
+	.macaddr_lo =		0x03040000,
+
+	.bcm3450_i2c_base =  0x10406200,
+	.bcm3450_i2c_addr =  0x70,
+	.hw_rev  =     HWREV_MOCA_20_GEN22,
+	.rf_band =     MOCA_BAND_EXT_D,
+	.chip_id =     0,
+	.use_dma           = 0,
+	.use_spi           = 1,
+	.devId            = MOCA_DEVICE_ID_UNREGISTERED, // Filled in dynamically
+#ifdef CONFIG_SMP
+	.smp_processor_id = 1,
+#endif
+};
+
+static struct resource moca_lan_resources[] = {
+	[0] = {
+		.start = 0x10600000,
+		.end =   0x107ffd97,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = { /* Not used for 6802, define for bmoca */
+		.start = 0,
+		.end = 0,
+		.flags = IORESOURCE_IRQ,
+	}
+};
+
+static struct platform_device moca_lan_plat_dev = {
+	.name = "bmoca",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(moca_lan_resources),
+	.resource = moca_lan_resources,
+	.dev = {
+		.platform_data = &moca_lan_data,
+		.release = bogus_release,
+	},
+};
+
+static struct moca_platform_data moca_wan_data = {
+	.macaddr_hi       = 0x00000102,
+	.macaddr_lo       = 0x03040000,
+
+	.bcm3450_i2c_base =  0x10406200,
+	.bcm3450_i2c_addr =  0x70,
+	.hw_rev  = HWREV_MOCA_20_GEN22,
+	.chip_id = 0,
+	
+	.rf_band = MOCA_BAND_EXT_D,
+
+	.use_dma           = 0,
+	.use_spi           = 1,
+	.devId            = MOCA_DEVICE_ID_UNREGISTERED, // Filled in dynamically
+
+#ifdef CONFIG_SMP
+	.smp_processor_id = 1,
+#endif
+};
+
+static struct resource moca_wan_resources[] = {
+	[0] = {
+		.start = 0x10600000,
+		.end =   0x107ffd97,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = { /* Not used for 6802, define for bmoca */
+		.start = 0,
+		.end = 0,
+		.flags = IORESOURCE_IRQ,
+	}
+};
+
+static struct platform_device moca_wan_plat_dev = {
+	.name          = "bmoca",
+	.id            = 1,
+	.num_resources = ARRAY_SIZE(moca_wan_resources),
+	.resource      = moca_wan_resources,
+	.dev           = {
+		.platform_data = &moca_wan_data,
+		.release       = bogus_release,
+	},
+};
+
+
+/* MoCA Clock Functions */
+struct clk *moca_clk_get(struct device *dev, const char *id)
+{
+	// We're not actually using the "struct clk" for anything
+	// We'll use our own structure
+	struct moca_680x_clk * pclk = kzalloc(sizeof(struct moca_680x_clk), GFP_KERNEL);
+
+	pclk->dev = dev;
+
+	if (!strcmp(id, "moca-cpu"))
+		pclk->clock_num = MOCA_CPU_CLOCK_NUM;
+	else if (!strcmp(id, "moca-phy"))
+		pclk->clock_num = MOCA_PHY_CLOCK_NUM;
+	else
+	{
+		kfree(pclk);
+		return(NULL);
+	}
+
+	return((struct clk *)pclk);
+}
+
+int moca_clk_enable(struct clk *clk)
+{
+	return 0;
+}
+
+void moca_clk_disable(struct clk *clk)
+{
+}
+
+void moca_clk_put(struct clk *clk)
+{
+	kfree((struct moca_680x_clk *)clk);
+}
+
+struct moca_6802c0_clock_params
+{
+	uint32_t        cpu_hz;
+	uint32_t        pdiv;
+	uint32_t        ndiv;
+	uint32_t        pll_mdivs[6];
+};
+
+#define NUM_6802C0_CLOCK_OPTIONS 2
+struct moca_6802c0_clock_params moca_6802c0_clock_params[NUM_6802C0_CLOCK_OPTIONS] =
+{
+	{  // VCO of 2200, default
+		440000000,             // cpu_hz
+		1,                     // pdiv
+		44,                    // ndiv
+		{5, 22, 7, 7, 44, 44}  // pll_mdivs[6]
+	},
+	{  // VCO of 2400
+		400000000,             // cpu_hz
+		1,                     // pdiv
+		48,                    // ndiv
+		{6, 24, 8, 8, 48, 48}  // pll_mdivs[6]
+	},
+};
+
+int moca_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	// The MOCA_RD/MOCA_WR macros need a valid 'priv->pdev->dev'
+	static struct moca_priv_data dummy_priv; 
+	static struct platform_device dummy_pd;
+	struct moca_priv_data *priv = &dummy_priv;
+	struct moca_680x_clk * pclk = (struct moca_680x_clk *) clk;
+	struct moca_platform_data * pMocaData = (struct moca_platform_data *)pclk->dev->platform_data;
+	struct moca_6802c0_clock_params * p_clock_data = &moca_6802c0_clock_params[0];
+	uint32_t i;
+	uint32_t addr;
+	uint32_t data;
+	int ret = -1;
+
+	priv->pdev = &dummy_pd;
+	priv->pdev->dev = *pclk->dev;
+
+	if (pclk->clock_num == MOCA_CPU_CLOCK_NUM)
+	{
+		if ((pMocaData->chip_id & 0xFFFFFFF0) == 0x680200C0)
+		{
+			if (rate == 0)
+			{
+				rate = 440000000;
+			}
+			
+			for (i = 0; i < NUM_6802C0_CLOCK_OPTIONS; i++)
+			{
+				if (moca_6802c0_clock_params[i].cpu_hz == rate)
+				{
+					p_clock_data = &moca_6802c0_clock_params[i];
+					ret = 0;
+				}
+			}
+
+			// 1. Set POST_DIVIDER_HOLD_CHx (bit [12] in each PLL_CHANNEL_CTRL_CH_x 
+			//    register)  // this will zero the output channels
+			for (addr = 0x1010003c; addr <= 0x10100050; addr += 4)
+			{
+				MOCA_SET(addr, (1 << 12));
+			}
+
+			//2. Program new PDIV/NDIV value, this will lose lock and 
+			//   trigger a new PLL lock process for a new VCO frequency
+			MOCA_WR(0x10100058, ((p_clock_data->pdiv << 10) | p_clock_data->ndiv));
+
+			//3. Wait >10 usec for lock time // max lock time per data sheet is 460/Fref, 
+			//   Or alternatively monitor CLKGEN_PLL_SYS*_PLL_LOCK_STATUS to check if PLL has locked
+			data = 0;
+			i = 0;
+			while ((data & 0x1) == 0)
+			{
+				/* This typically is only read once */
+				data = MOCA_RD(0x10100060); // CLKGEN_PLL_SYS1_PLL_LOCK_STATUS
+
+				if (i++ > 10)
+				{
+					printk("MoCA SYS1 PLL NOT LOCKED!\n");
+					break;
+				}
+			}
+
+			//4. Configure new MDIV value along with set POST_DIVIDER_LOAD_EN_CHx 
+			//   (bit [13]=1, while keep bit[12]=1) in each PLL_CHANNEL_CTRL_CH_x register
+			i = 0;
+			for (addr = 0x1010003c; addr <= 0x10100050; addr += 4)
+			{
+				data = MOCA_RD(addr);
+				data |= (1 << 13);
+				data &= ~(0xFF << 1);
+				data |= (p_clock_data->pll_mdivs[i] << 1);
+				MOCA_WR(addr, data);
+				i++;
+			}
+
+			//5. Clear bits [12] and bit [13] in each PLL_CHANNEL_CTRL_CH_x
+			for (addr = 0x1010003c; addr <= 0x10100050; addr += 4)
+			{
+				MOCA_UNSET(addr, ((1 << 13) | (1 << 12)));
+			}
+
+		}
+	}
+
+	return(ret);
+}
+
+
+static void moca_enable_irq(struct moca_priv_data *priv)
+{
+	kerSysMocaHostIntrEnable(((struct moca_platform_data *)priv->pdev->dev.platform_data)->devId);
+}
+
+static void moca_disable_irq(struct moca_priv_data *priv)
+{
+	kerSysMocaHostIntrDisable(((struct moca_platform_data *)priv->pdev->dev.platform_data)->devId);
+}
+
+static void moca_pmb_busy_wait(struct moca_priv_data *priv)
+{
+	#if 0
+	uint32_t data;
+
+	/* Possible time saver: The register access time over SPI may 
+	   always be enough to guarantee that the write will complete 
+	   in time without having to check the status. */
+	do
+	{
+		data = MOCA_RD(priv->base + priv->regs->pmb_master_status);
+	} while (data & 0x1);
+	#endif
+}
+
+void moca_pmb_delay(struct moca_priv_data *priv)
+{
+	unsigned int data;
+	int i, j;
+	
+	MOCA_WR(priv->base + priv->regs->pmb_master_wdata_offset, 0xFF444000);
+	
+	for (i = 0; i < MOCA_BPCM_NUM; i++)
+	{
+		for (j = 0; j < MOCA_BPCM_ZONES_NUM; j++)
+		{
+			data = 0x100012 + j*4 + i*0x1000; ;
+			MOCA_WR(priv->base + priv->regs->pmb_master_cmd_offset, data);
+			moca_pmb_busy_wait(priv);
+		}
+	}
+}
+
+static void moca_pmb_control(struct moca_priv_data *priv, PMB_COMMAND_E cmd)
+{
+	int i, j;
+	uint32_t * p_zone_control;
+	uint32_t data;
+
+	switch (cmd)
+	{
+		case PMB_COMMAND_ALL_OFF:
+			// Turn off zone command
+			MOCA_WR(priv->base + priv->regs->pmb_master_wdata_offset, 0xA00);
+			p_zone_control = &zone_all_off_bitmask[0];
+			break;
+
+		case PMB_COMMAND_PHY1_OFF:
+			// Turn off zone command
+			MOCA_WR(priv->base + priv->regs->pmb_master_wdata_offset, 0xA00);
+			p_zone_control = &zone_phy1_bitmask[0];
+			break;
+		 
+	 case PMB_COMMAND_PHY1_ON:
+			// Turn on zone command
+			MOCA_WR(priv->base + priv->regs->pmb_master_wdata_offset, 0xC00);
+			p_zone_control = &zone_phy1_bitmask[0];
+			break;
+		 
+	 case PMB_COMMAND_PARTIAL_ON:
+			// Turn on zone command
+			MOCA_WR(priv->base + priv->regs->pmb_master_wdata_offset, 0xC00);
+			p_zone_control = &zone_partial_on_bitmask[0];
+			break;
+		 
+		 
+		default:
+			printk(KERN_WARNING "%s: illegal cmd: %08x\n",
+				__FUNCTION__, cmd);
+			return;
+	}
+
+	for (i = 0; i < MOCA_BPCM_NUM; i++)
+	{
+		for (j = 0; j < MOCA_BPCM_ZONES_NUM; j++)
+		{
+			if (*p_zone_control & (1 << j))
+			{
+				// zone address in bpcms
+				data = (0x1 << 20) + 16 + (i * 4096) + (j * 4);
+				MOCA_WR(priv->base + priv->regs->pmb_master_cmd_offset, data);
+				moca_pmb_busy_wait(priv);
+			}
+		}
+		p_zone_control++;
+	}
+
+}
+
+static void moca_pmb_give_cntrl(struct moca_priv_data *priv, PMB_GIVE_OWNERSHIP_E cmd)
+{
+	int i;
+	uint32_t data;
+
+	/* Pass control over the memories to the FW */
+	MOCA_WR(priv->base + priv->regs->pmb_master_wdata_offset, cmd);
+	for (i = 0; i < 3; i++)
+	{
+		data = 0x100002 + i*0x1000;
+		MOCA_WR(priv->base + priv->regs->pmb_master_cmd_offset, data);   
+		moca_pmb_busy_wait(priv);
+	}
+	moca_pmb_busy_wait(priv);
+}
+
+static void moca_hw_reset(struct moca_priv_data *priv)
+{
+//	unsigned long flags;
+//   uint32_t chipid;
+  
+
+	/* disable and clear all interrupts */
+	MOCA_WR(priv->base + priv->regs->l2_mask_set_offset, 0xffffffff);
+	MOCA_RD(priv->base + priv->regs->l2_mask_set_offset);
+
+	/* assert resets */
+
+	/* reset CPU first, both CPUs for MoCA 20 HW */
+	if (priv->hw_rev == HWREV_MOCA_20_GEN22)
+		MOCA_SET(priv->base + priv->regs->sw_reset_offset, 5);
+	else
+		MOCA_SET(priv->base + priv->regs->sw_reset_offset, 1);
+
+	MOCA_RD(priv->base + priv->regs->sw_reset_offset);
+
+	udelay(20);
+
+	/* reset everything else except clocks */
+	MOCA_SET(priv->base + priv->regs->sw_reset_offset, 
+		~((1 << 3) | (1 << 7) | (1 << 15) | (1 << 16)));
+	MOCA_RD(priv->base + priv->regs->sw_reset_offset);
+
+	udelay(20);
+
+	/* disable clocks */
+	MOCA_SET(priv->base + priv->regs->sw_reset_offset, 
+		~((1 << 3) | (1 << 15) | (1 << 16)));
+	MOCA_RD(priv->base + priv->regs->sw_reset_offset);
+
+	MOCA_WR(priv->base + priv->regs->l2_clear_offset, 0xffffffff);
+	MOCA_RD(priv->base + priv->regs->l2_clear_offset);
+
+	/* Power down all zones */
+	//  The host can't give to itself permission.
+	moca_pmb_control(priv, PMB_COMMAND_ALL_OFF);
+
+	/* Power down all SYS_CTRL memories */
+	MOCA_WR(0x10100068, 1);   // CLKGEN_PLL_SYS1_PLL_PWRDN
+	MOCA_SET(0x1010000c, 1);  // CLKGEN_PLL_SYS0_PLL_CHANNEL_CTRL_CH_3
+
+}
+
+static unsigned int moca_get_phy_freq(struct moca_priv_data *priv)
+{
+	unsigned int x = MOCA_RD(0x10100044); // CLKGEN_PLL_SYS1_PLL_CHANNEL_CTRL_CH_2
+
+	x = (x >> 1) & 0xFF; // Get the MDIV_CH2 field
+
+	if (!x)
+		return 0;
+
+	return(2400 / x); 
+}
+
+
+static void moca_ps_PowerCtrlPHY1(struct moca_priv_data *priv,  PMB_COMMAND_E cmd)
+{
+	uint32_t pll_ctrl_3, pll_ctrl_5, sw_reset; 
+	pll_ctrl_3 = MOCA_RD (0x10100048);
+	pll_ctrl_5 = MOCA_RD (0x10100050);
+	sw_reset = MOCA_RD (priv->base + priv->regs->sw_reset_offset);
+
+	// enable PLL 
+	MOCA_UNSET(0x10100048, 1);  // CLKGEN_PLL_SYS1_PLL_CHANNEL_CTRL_CH_3 
+	MOCA_UNSET(0x10100050, 1);  // CLKGEN_PLL_SYS1_PLL_CHANNEL_CTRL_CH_5 
+
+	udelay(1);
+
+	// de assert moca_phy1_disable_clk
+	MOCA_UNSET(priv->base + priv->regs->sw_reset_offset, (1 << 9));
+
+	moca_pmb_control(priv, cmd);
+
+	MOCA_WR (0x10100048, pll_ctrl_3);
+	MOCA_WR (0x10100050, pll_ctrl_5);
+
+	udelay(1);
+	
+	MOCA_WR (priv->base + priv->regs->sw_reset_offset, sw_reset);	
+}
+
+
+static void moca_gphy_init(struct moca_priv_data *priv)
+{
+	struct moca_platform_data * pMocaData = (struct moca_platform_data *)priv->pdev->dev.platform_data;
+	u32 port_mode;
+	u32 rgmii0_on;
+	u32 rgmii1_on;
+	u32 gphy_enabled = 0;
+
+	port_mode = MOCA_RD(0x10800000) & 0x3;
+	rgmii0_on = MOCA_RD(0x1080000c) & 0x1;
+	rgmii1_on = MOCA_RD(0x10800018) & 0x1;
+
+	if ((pMocaData->chip_id & 0xFFFEFFF0) == 0x680200C0)
+	{
+		if ((port_mode == 0) ||
+		    ((port_mode == 1) && rgmii0_on) ||
+		    ((port_mode == 2) && rgmii1_on))
+		{
+			gphy_enabled = 1;
+		}
+	}
+	else
+	{
+		if ((port_mode == 0) ||
+		    ((port_mode != 3) && rgmii1_on))
+		{
+			gphy_enabled = 1;
+		}
+	}
+
+	if (gphy_enabled)
+	{
+		MOCA_UNSET(0x10800004, 0xF);
+		msleep(10);
+		MOCA_WR(0x1040431c, 0xFFFFFFFF);
+	}
+}
+
+/* called any time we start/restart/stop MoCA */
+static void moca_hw_init(struct moca_priv_data *priv, int action)
+{
+	u32 mask;
+	u32 temp;
+	u32 data;
+	u32 count = 0;
+	struct moca_platform_data * pMocaData = (struct moca_platform_data *)priv->pdev->dev.platform_data;
+
+	if (action == MOCA_ENABLE && !priv->enabled) {
+		moca_clk_enable(priv->clk);
+
+		MOCA_WR(0x1040431c, ~(1 << 26)); // SUN_TOP_CTRL_SW_INIT_0_CLEAR --> Do this at start of sequence, don't touch gphy_sw_init
+		udelay(20);
+		moca_gphy_init(priv);
+   
+		priv->enabled = 1;
+	}
+
+	/* clock not enabled, register accesses will fail with bus error */
+	if (!priv->enabled)
+		return;
+
+	moca_hw_reset(priv);
+	udelay(1);
+
+	if (action == MOCA_ENABLE) {
+
+		/* Power up all zones */
+		moca_pmb_control(priv, PMB_COMMAND_PARTIAL_ON);
+
+		MOCA_UNSET(0x1010000c, 1);  // CLKGEN_PLL_SYS0_PLL_CHANNEL_CTRL_CH_3 
+
+		MOCA_WR(0x1010006C, 1);  // CLKGEN_PLL_SYS1_PLL_RESET 
+		MOCA_WR(0x10100068, 0);  // CLKGEN_PLL_SYS1_PLL_PWRDN 
+		data = 0;
+		while ((data & 0x1) == 0)
+		{
+			/* This typically is only read once */
+			data = MOCA_RD(0x10100060); // CLKGEN_PLL_SYS1_PLL_LOCK_STATUS
+
+			if (count++ > 10)
+				break;
+		}
+		MOCA_WR(0x1010006C, 0);  // CLKGEN_PLL_SYS1_PLL_RESET 
+
+		if (priv->bonded_mode) {
+			MOCA_UNSET(0x10100048, 1);  // CLKGEN_PLL_SYS1_PLL_CHANNEL_CTRL_CH_3 
+			MOCA_UNSET(0x10100050, 1);  // CLKGEN_PLL_SYS1_PLL_CHANNEL_CTRL_CH_5 
+		} else {
+			MOCA_SET(0x10100048, 1);  // CLKGEN_PLL_SYS1_PLL_CHANNEL_CTRL_CH_3 
+			MOCA_SET(0x10100050, 1);  // CLKGEN_PLL_SYS1_PLL_CHANNEL_CTRL_CH_5 
+		}
+		udelay(1);
+
+		/* deassert moca_sys_reset, system clock, phy0, phy0 clock */
+		mask = (1 << 1) | (1 << 7) | (1 << 4) | (1 << 8);
+
+		/* deassert phy1 and phy1 clock in bonded mode */
+		if (priv->bonded_mode)
+			mask |= (1 << 5) | (1 << 9);
+
+		MOCA_UNSET(priv->base + priv->regs->sw_reset_offset, mask);
+		MOCA_RD(priv->base + priv->regs->sw_reset_offset);
+		
+        // Before power off the memories, moca_phy1_disable_clk.    
+		if (priv->bonded_mode==0)
+			moca_ps_PowerCtrlPHY1(priv, PMB_COMMAND_PHY1_OFF);
+		else
+			moca_ps_PowerCtrlPHY1(priv, PMB_COMMAND_PHY1_ON);
+
+        
+		moca_pmb_give_cntrl(priv, PMB_GIVE_OWNERSHIP_2_FW);
+			
+		/* Check for 6802/6803 A0 chip only with Xtal mod */
+		if ((pMocaData->chip_id & 0xFFFEFFFF) == 0x680200A0)
+		{
+			data = MOCA_RD(0x1040401c);
+			if ((data & 0x7) == 0x2) {
+				/* 25MHz */
+				printk("MoCA running with 25MHz XTAL\n");
+				MOCA_WR(priv->base + priv->regs->host2moca_mmp_outbox_0_offset, 1);
+			} else {
+				printk("MoCA == 50MHz XTAL\n");
+				/* 50MHz clock change only */
+				MOCA_WR(priv->base + priv->regs->host2moca_mmp_outbox_0_offset, 0);
+				//Note: The re-configuration is in NDIV_INT, not PDIV.
+				//`CLKGEN_REG_START + `CLKGEN_PLL_SYS1_PLL_DIV (32'h10100058) [09:00] = 10’d48
+				temp = MOCA_RD(0x10100058);
+				temp = (temp & 0xFFFFFC00) + 48;
+				MOCA_WR(0x10100058, temp);
+
+				//`CLKGEN_REG_START + `CLKGEN_PLL_SYS0_PLL_DIV (32'h10100018) [09:00] = 10’d40
+				temp = MOCA_RD(0x10100018);
+				temp = (temp & 0xFFFFFC00) + 40;
+				MOCA_WR(0x10100018, temp);
+
+				//`CLKGEN_REG_START + `CLKGEN_PLL_SYS1_PLL_CHANNEL_CTRL_CH_4 (32'h1010004C) [08:01] = 8’d48
+				temp = MOCA_RD(0x1010004c);
+				temp = (temp & 0xFFFFFE01) + (48 << 1);
+				MOCA_WR(0x1010004c, temp);
+
+				//`CLKGEN_REG_START + `CLKGEN_PLL_SYS1_PLL_CHANNEL_CTRL_CH_5 (32'h10100050) [08:01] = 8’d48
+				temp = MOCA_RD(0x10100050);
+				temp = (temp & 0xFFFFFE01) + (48 << 1);
+				MOCA_WR(0x10100050, temp);
+
+				// Then Restart the PLL.
+
+				//`CLKGEN_REG_START + `CLKGEN_PLL_SYS0_PLL_RESET (32'h1010002C) [0] = 1’b1
+				MOCA_SET(0x1010002c, 1);
+				//`CLKGEN_REG_START + `CLKGEN_PLL_SYS1_PLL_RESET (32'h1010006C) [0] = 1’b1
+				MOCA_SET(0x1010006c, 1);
+
+				udelay(1);
+
+				//`CLKGEN_REG_START + `CLKGEN_PLL_SYS0_PLL_RESET (32'h1010002C) [0] = 1’b0
+				MOCA_UNSET(0x1010002c, 1);
+				//`CLKGEN_REG_START + `CLKGEN_PLL_SYS1_PLL_RESET (32'h1010006C) [0] = 1’b0
+				MOCA_UNSET(0x1010006c, 1);
+			}
+		}
+
+		// CLKGEN_PLL_SYS1_PLL_SSC_MODE_CONTROL_HIGH
+		data = MOCA_RD(0x10100070);
+		data = (data & 0xFFFF0000) | 0x7dd;
+		MOCA_WR(0x10100070, data);
+
+		// CLKGEN_PLL_SYS1_PLL_SSC_MODE_CONTROL_LOW
+		data = MOCA_RD(0x10100074);
+		data = (data & 0xffc00000) | 0x3d71;
+		MOCA_WR(0x10100074, data);
+
+		// CLKGEN_PLL_SYS1_PLL_SSC_MODE_CONTROL_LOW
+		MOCA_SET(0x10100074, (1 << 22));
+	}
+
+
+	if (priv->hw_rev <= HWREV_MOCA_20_GEN21) {
+	/* clear junk out of GP0/GP1 */
+		MOCA_WR(priv->base + priv->regs->gp0_offset, 0xffffffff);
+		MOCA_WR(priv->base + priv->regs->gp1_offset, 0x0);
+		/* set up activity LED for 50% duty cycle */
+		MOCA_WR(priv->base + priv->regs->led_ctrl_offset,
+			0x40004000);
+	}
+
+	/* enable DMA completion interrupts */
+	mask = M2H_REQ | M2H_RESP | M2H_ASSERT | M2H_WDT_CPU1 |
+		M2H_NEXTCHUNK | M2H_DMA;
+
+	if (priv->hw_rev >= HWREV_MOCA_20_GEN21)
+		mask |= M2H_WDT_CPU0 | M2H_NEXTCHUNK_CPU0 |
+			M2H_REQ_CPU0 | M2H_RESP_CPU0 | M2H_ASSERT_CPU0;
+
+	MOCA_WR(priv->base + priv->regs->ringbell_offset, 0);
+	MOCA_WR(priv->base + priv->regs->l2_mask_clear_offset, mask);
+	MOCA_RD(priv->base + priv->regs->l2_mask_clear_offset);
+
+
+	/* Set pinmuxing for MoCA interrupt and flow control */
+	MOCA_UNSET(0x10404110, 0xF00000FF);
+	MOCA_SET(0x10404110, 0x10000022);
+ 
+	/* Set pinmuxing for MoCA IIC control */
+	if (((pMocaData->chip_id & 0xFFFFFFF0) == 0x680200C0) || 
+	    ((pMocaData->chip_id & 0xFFFFFFF0) == 0x680300C0))
+	{
+		MOCA_UNSET(0x10404100, 0xFF);  // pin muxing
+		MOCA_SET(0x10404100, 0x22);  // pin muxing
+	}
+
+	MOCA_WR(0x100b0318, 2);
+
+	if (action == MOCA_DISABLE && priv->enabled) {
+		priv->enabled = 0;
+		moca_clk_disable(priv->clk);
+	}
+}
+
+static void moca_ringbell(struct moca_priv_data *priv, uint32_t mask)
+{
+	MOCA_WR(priv->base + priv->regs->ringbell_offset, mask);
+}
+
+static uint32_t moca_start_mips(struct moca_priv_data *priv, unsigned int cpu)
+{
+	if (priv->hw_rev == HWREV_MOCA_20_GEN22) {
+		if (cpu == 1)
+			MOCA_UNSET(priv->base + priv->regs->sw_reset_offset,
+				(1 << 0));
+		else {
+			moca_mmp_init(priv, 1);
+			MOCA_UNSET(priv->base + priv->regs->sw_reset_offset,
+				(1 << 2));
+		}
+	} else
+		MOCA_UNSET(priv->base + priv->regs->sw_reset_offset, (1 << 0));
+	MOCA_RD(priv->base + priv->regs->sw_reset_offset);
+
+	return(0);
+}
+
+static void moca_m2m_xfer(struct moca_priv_data *priv,
+	uint32_t dst, uint32_t src, uint32_t ctl)
+{
+	uint32_t status;
+
+	MOCA_WR(priv->base + priv->regs->m2m_src_offset, src);
+	MOCA_WR(priv->base + priv->regs->m2m_dst_offset, dst);
+	MOCA_WR(priv->base + priv->regs->m2m_status_offset, 0);
+	MOCA_RD(priv->base + priv->regs->m2m_status_offset);
+	MOCA_WR(priv->base + priv->regs->m2m_cmd_offset, ctl);
+
+	do {
+		status = MOCA_RD(priv->base + priv->regs->m2m_status_offset);
+	} while(status == 0);
+
+}
+
+static void moca_write_mem(struct moca_priv_data *priv,
+	uint32_t dst_offset, void *src, unsigned int len)
+{
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+
+	if((dst_offset >= priv->regs->cntl_mem_offset+priv->regs->cntl_mem_size) ||
+		((dst_offset + len) > priv->regs->cntl_mem_offset+priv->regs->cntl_mem_size)) {
+		printk(KERN_WARNING "%s: copy past end of cntl memory: %08x\n",
+			__FUNCTION__, dst_offset);
+		return;
+	}
+
+	if ( 1 == pd->use_dma )
+	{
+		dma_addr_t pa;
+
+		pa = dma_map_single(&priv->pdev->dev, src, len, DMA_TO_DEVICE);
+		mutex_lock(&priv->copy_mutex);
+		moca_m2m_xfer(priv, dst_offset + priv->regs->data_mem_offset, (uint32_t)pa, len | M2M_WRITE);
+		mutex_unlock(&priv->copy_mutex);
+		dma_unmap_single(&priv->pdev->dev, pa, len, DMA_TO_DEVICE);
+	}
+	else
+	{
+		uintptr_t addr = (uintptr_t)priv->base + priv->regs->data_mem_offset + dst_offset;
+		uint32_t *data = src;
+		int i;
+
+		mutex_lock(&priv->copy_mutex);
+		if (((struct moca_platform_data *)priv->pdev->dev.platform_data)->use_spi == 1)
+		{
+			src = data;
+			MOCA_WR_BLOCK(addr, src, len);
+		}
+		else
+		{
+			for(i = 0; i < len; i += 4, addr += 4, data++)
+				MOCA_WR(addr, *data);
+			MOCA_RD(addr - 4);	/* flush write */
+		}
+
+		mutex_unlock(&priv->copy_mutex);
+	}
+}
+
+static void moca_read_mem(struct moca_priv_data *priv,
+	void *dst, uint32_t src_offset, unsigned int len)
+{
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+    
+	if((src_offset >= priv->regs->cntl_mem_offset+priv->regs->cntl_mem_size) ||
+		((src_offset + len) > priv->regs->cntl_mem_offset+priv->regs->cntl_mem_size)) {
+		printk(KERN_WARNING "%s: copy past end of cntl memory: %08x\n",
+			__FUNCTION__, src_offset);
+		return;
+	}
+
+	if ( 1 == pd->use_dma )
+	{
+		dma_addr_t pa;
+
+		pa = dma_map_single(&priv->pdev->dev, dst, len, DMA_FROM_DEVICE);
+		mutex_lock(&priv->copy_mutex);
+		moca_m2m_xfer(priv, (uint32_t)pa, src_offset + priv->regs->data_mem_offset, len | M2M_READ);
+		mutex_unlock(&priv->copy_mutex);
+		dma_unmap_single(&priv->pdev->dev, pa, len, DMA_FROM_DEVICE);
+	}
+	else
+	{
+		uintptr_t addr = priv->regs->data_mem_offset + src_offset;
+		uint32_t *data = dst;
+		int i;
+
+		mutex_lock(&priv->copy_mutex);
+		if (((struct moca_platform_data *)priv->pdev->dev.platform_data)->use_spi == 1)
+		{
+			MOCA_RD_BLOCK((uintptr_t)priv->base + addr, dst, len);
+		}
+		else
+		{
+			for(i = 0; i < len; i += 4, addr += 4, data++)
+				*data = MOCA_RD((uintptr_t)priv->base + addr);
+		}
+		mutex_unlock(&priv->copy_mutex);
+	}
+}
+
+static void moca_write_sg(struct moca_priv_data *priv,
+	uint32_t dst_offset, struct scatterlist *sg, int nents)
+{
+	int j;
+	uintptr_t addr = priv->regs->data_mem_offset + dst_offset;
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+
+	dma_map_sg(&priv->pdev->dev, sg, nents, DMA_TO_DEVICE);
+
+	mutex_lock(&priv->copy_mutex);
+	for(j = 0; j < nents; j++)
+	{
+		if ( 1 == pd->use_dma )
+		{
+		    // printk("XXX copying page %d, PA %08x\n", j, (int)sg[j].dma_address);
+			moca_m2m_xfer(priv, addr, (uint32_t)sg[j].dma_address, 
+				sg[j].length | M2M_WRITE);
+
+			addr += sg[j].length;
+		}
+		else
+		{
+			unsigned long *data = (void *)phys_to_virt(sg[j].dma_address);
+         //printk("%s: Writing 0x%lx to addr 0x%08lx (len = %d)\n", __FUNCTION__, *data, ((unsigned long)priv->base) + addr, sg[j].length);
+			MOCA_WR_BLOCK(((unsigned long)priv->base) + addr, data, sg[j].length);
+			addr += sg[j].length;
+		}
+	}
+	mutex_unlock(&priv->copy_mutex);
+
+	dma_unmap_sg(&priv->pdev->dev, sg, nents, DMA_TO_DEVICE);
+}
+
+/* NOTE: this function is not tested */
+#if 0
+static void moca_read_sg(struct moca_priv_data *priv,
+	uint32_t src_offset, struct scatterlist *sg, int nents)
+{
+	int j;
+	uintptr_t addr = priv->data_mem_offset + src_offset;
+
+	dma_map_sg(&priv->pdev->dev, sg, nents, DMA_FROM_DEVICE);
+
+	mutex_lock(&priv->copy_mutex);
+	for(j = 0; j < nents; j++) {
+#if 0 //USE_DMA
+		 printk("XXX copying page %d, PA %08x\n", j, (int)sg[j].dma_address);
+		moca_m2m_xfer(priv, addr, (uint32_t)sg[j].dma_address,
+			sg[j].length | M2M_READ);
+
+		addr += sg[j].length;
+#else
+		uint32_t *data = (void *)phys_to_virt(sg[j].dma_address);
+		unsigned int len = sg[j].length;
+		int i;
+
+		for(i = 0; i < len; i += 4, addr += 4, data++) {
+			*data = cpu_to_be32(
+				MOCA_RD((uintptr_t)priv->base + addr));
+			//printk("MoCA READ: AD 0x%x  = 0x%x (0x%x)\n", (priv->base + addr), MOCA_RD((uintptr_t)priv->base + addr), *data);
+		 }
+#endif
+	}
+	mutex_unlock(&priv->copy_mutex);
+
+	dma_unmap_sg(&priv->pdev->dev, sg, nents, DMA_FROM_DEVICE);
+}
+#endif
+
+static void moca_read_mac_addr(struct moca_priv_data *priv, uint32_t * hi, uint32_t * lo)
+{
+	struct net_device * pdev ;
+	char					 mocaName[7] ;
+
+	if (priv == NULL)
+		sprintf (mocaName, "moca%u", 0) ;
+	else
+		sprintf (mocaName, "moca%u", ((struct moca_platform_data *)priv->pdev->dev.platform_data)->devId) ;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+	pdev = dev_get_by_name ( &init_net, mocaName ) ;
+#else
+	pdev = dev_get_by_name ( mocaName ) ;
+#endif
+
+	if ((pdev != NULL) && (lo != NULL) && (hi != NULL)) {
+		mac_to_u32(hi, lo, pdev->dev_addr);
+	}
+}
+
+
+#if defined(DSL_MOCA)
+
+/*
+ * This helper function was added to allow the enet driver to compile in
+ * consumer environment for 68xx profiles.
+ */
+void moca_get_fc_bits(void * arg, unsigned long *moca_fc_reg)
+{
+	struct moca_priv_data *     priv;
+	struct moca_platform_data * pMocaData;
+	unsigned long               flags;
+
+	if (arg == NULL) {
+		return;
+	}
+
+	priv = (struct moca_priv_data *) arg;
+	pMocaData = (struct moca_platform_data *)priv->pdev->dev.platform_data;
+
+	*moca_fc_reg = 0;
+	if (priv != NULL)
+	{
+		/* We can't read moca core regs unless the core's clocks are on. */
+		spin_lock_irqsave(&priv->clock_lock, flags);
+		if (priv->running) {
+			*moca_fc_reg = MOCA_RD(priv->base+priv->regs->sideband_gmii_fc_offset);
+		}
+		spin_unlock_irqrestore(&priv->clock_lock, flags);
+	}
+}
+
+#endif /* DSL_MOCA */
+
+
+//extern void bcmenet_register_moca_fc_bits_cb(void cb(void *, unsigned long *), int isWan, void * arg);
+
+static void moca_mem_init_680xC0( struct moca_priv_data *priv )
+{
+	// De-assert reset (all memories are OFF by default Force_SP_off =1, Force_Rf_off =1)
+	MOCA_UNSET(priv->base + priv->regs->sw_reset_offset, ((1 << 15) | (1 << 16)));
+
+	moca_pmb_delay(priv);
+	moca_pmb_control(priv, PMB_COMMAND_ALL_OFF);
+
+	//Write Force_SP_on =0, Force_SP_off =0, Force_RF_on =0, Force_RF_off =0
+	MOCA_UNSET(priv->base + 0x001ffd14, ((1 << 10) | (1 << 11)));
+	moca_pmb_control(priv, PMB_COMMAND_PARTIAL_ON);
+}
+
+static int  hw_specific_init( struct moca_priv_data *priv )
+{
+#ifdef DSL_MOCA
+	struct moca_platform_data *pMocaData;
+
+	pMocaData = (struct moca_platform_data *)priv->pdev->dev.platform_data;
+
+	/* fill in the hw_rev field */
+	pMocaData->chip_id = MOCA_RD(0x10404004) + 0xA0;
+	if ((pMocaData->chip_id & 0xFFFE0000) != 0x68020000) { /* 6802 or 6803 */
+		printk(KERN_ERR "bmoca: No MoCA chip found\n");
+		return -EFAULT;
+	}
+
+	if (((pMocaData->chip_id & 0xFFFFFFF0) == 0x680200C0) || ((pMocaData->chip_id & 0xFFFFFFF0) == 0x680300C0))
+	{
+		priv->i2c_base = NULL; 
+
+		/* Initialize 680x CO memory */
+		moca_mem_init_680xC0(priv);
+	}
+
+	pMocaData->hw_rev = HWREV_MOCA_20_GEN22;
+
+	/* Power down all LEAP memories */
+	MOCA_WR(0x101000e4, 0x6); // CLKGEN_LEAP_TOP_INST_DATA   
+	MOCA_WR(0x101000e8, 0x6); // CLKGEN_LEAP_TOP_INST_HAB 
+	MOCA_WR(0x101000ec, 0x6); // CLKGEN_LEAP_TOP_INST_PROG0
+	MOCA_WR(0x101000f0, 0x6); // CLKGEN_LEAP_TOP_INST_PROG1   
+	MOCA_WR(0x101000f4, 0x6); // CLKGEN_LEAP_TOP_INST_PROG2  
+	MOCA_WR(0x101000f8, 0x6); // CLKGEN_LEAP_TOP_INST_ROM
+	MOCA_WR(0x101000fc, 0x6); // CLKGEN_LEAP_TOP_INST_SHARED  
+	MOCA_WR(0x10100164, 0x3); // CLKGEN_SYS_CTRL_INST_POWER_SWITCH_MEMORY 
+
+//	bcmenet_register_moca_fc_bits_cb(
+//		moca_get_fc_bits, pMocaData->use_spi ? 1 : 0, (void *)priv);
+#endif
+
+	return 0;
+}
+
+static int moca_platform_dev_register(void)
+{
+	struct moca_platform_data *pMocaData;
+	struct platform_device *pPlatformDev;
+	BP_MOCA_INFO mocaInfo[BP_MOCA_MAX_NUM];
+	int mocaChipNum = BP_MOCA_MAX_NUM;
+	int i;
+	int ret = 0;   
+
+	BpGetMocaInfo(mocaInfo, &mocaChipNum);
+
+	for (i = 0; i < mocaChipNum; i++) {
+		switch (mocaInfo[i].type) {
+			case BP_MOCA_TYPE_WAN:
+				pMocaData = &moca_wan_data;
+				pPlatformDev = &moca_wan_plat_dev;
+				break;
+
+			case BP_MOCA_TYPE_LAN:
+				pMocaData = &moca_lan_data;
+				pPlatformDev = &moca_lan_plat_dev;
+				break;
+
+			default:
+				printk(KERN_ERR "bmoca: unrecognized MoCA type %d\n",
+					mocaInfo[i].type);
+				return(-1);
+				break;
+		}
+
+		ret = platform_device_register(pPlatformDev);
+		if (ret < 0) {
+			return(ret);
+		}
+		else {
+			pMocaData->devId = i;
+
+			/* Map the board params RF Band to the bmoca.h value */
+			switch (mocaInfo[i].rfBand)
+			{
+				case BP_MOCA_RF_BAND_D_LOW:
+					pMocaData->rf_band = MOCA_BAND_D_LOW;
+					break;
+				case BP_MOCA_RF_BAND_D_HIGH:
+					pMocaData->rf_band = MOCA_BAND_D_HIGH;
+					break;
+				case BP_MOCA_RF_BAND_EXT_D:
+					pMocaData->rf_band = MOCA_BAND_EXT_D;
+					break;
+				case BP_MOCA_RF_BAND_E:
+					pMocaData->rf_band = MOCA_BAND_E;
+					break;
+				case BP_MOCA_RF_BAND_F:    
+					pMocaData->rf_band = MOCA_BAND_F;
+					break;
+				default:
+					/* Do nothing */
+					break;
+			}
+			printk(KERN_INFO "bmoca: Found MoCA device %d/%d  RF Band %d\n",
+				i, mocaChipNum, mocaInfo[i].rfBand);
+		}
+	}
+
+	return(ret);
+}
+
+static void moca_platform_dev_unregister(void)
+{
+	if (moca_lan_data.devId != MOCA_DEVICE_ID_UNREGISTERED)
+		platform_device_unregister(&moca_lan_plat_dev);
+
+	if (moca_wan_data.devId != MOCA_DEVICE_ID_UNREGISTERED)
+		platform_device_unregister(&moca_wan_plat_dev);
+}
+
+static void moca_3450_write(struct moca_priv_data *priv, u8 addr, u32 data)
+{
+	/* comment out for now. We don't use i2c on the 63268BHR board */
+#ifdef MOCA_3450_USE_I2C
+	if (((struct moca_platform_data *)priv->pdev->dev.platform_data)->use_spi == 0)
+		bcm3450_write_reg(addr, data);
+	else
+#endif
+	{
+		if (priv->i2c_base != NULL)
+			moca_3450_write_i2c(priv, addr, data);
+	}
+}
+
+static u32 moca_3450_read(struct moca_priv_data *priv, u8 addr)
+{
+	/* comment out for now. We don't use i2c on the 63268BHR board */
+#ifdef MOCA_3450_USE_I2C
+	if (((struct moca_platform_data *)priv->pdev->dev.platform_data)->use_spi == 0)
+		return(bcm3450_read_reg(addr));
+	else
+#endif
+	{
+		if (priv->i2c_base != NULL)
+			return(moca_3450_read_i2c(priv, addr));
+		else
+			return(0xffffffff);
+	}
+}
+
diff --git a/bmoca.h b/bmoca.h
index f7d7d69..a0d34ee 100644
--- a/bmoca.h
+++ b/bmoca.h
@@ -62,8 +62,11 @@
 #define MOCA_IOCTL_GET_DRV_INFO	_IOR(MOCA_IOC_MAGIC, 0, struct moca_kdrv_info)
 #define MOCA_IOCTL_SET_CPU_RATE	_IOR(MOCA_IOC_MAGIC, 7, unsigned int)
 #define MOCA_IOCTL_SET_PHY_RATE	_IOR(MOCA_IOC_MAGIC, 8, unsigned int)
-#define MOCA_IOCTL_GET_3450_REG	_IOR(MOCA_IOC_MAGIC, 9, unsigned int)
-#define MOCA_IOCTL_SET_3450_REG	_IOR(MOCA_IOC_MAGIC, 10, unsigned int)
+#define MOCA_IOCTL_GET_3450_REG	_IOR(MOCA_IOC_MAGIC, 9, unsigned int) /* Reserved */
+#define MOCA_IOCTL_SET_3450_REG	_IOR(MOCA_IOC_MAGIC, 10, unsigned int) /* Reserved */
+#define MOCA_IOCTL_PM_SUSPEND   _IO(MOCA_IOC_MAGIC, 11)
+#define MOCA_IOCTL_PM_WOL	_IO(MOCA_IOC_MAGIC, 12)
+#define MOCA_IOCTL_CLK_SSC	_IO(MOCA_IOC_MAGIC, 13)
 
 #define MOCA_DEVICE_ID_UNREGISTERED  (-1)
 
@@ -132,6 +135,17 @@
 	__u32			boot_flags;
 };
 
+/* MoCA PM states */
+enum moca_pm_states {
+	MOCA_ACTIVE,
+	MOCA_SUSPENDING,
+	MOCA_SUSPENDING_WAITING_ACK,
+	MOCA_SUSPENDING_GOT_ACK,
+	MOCA_SUSPENDED,
+	MOCA_RESUMING,
+	MOCA_NONE
+ };
+
 #ifdef __KERNEL__
 
 static inline void mac_to_u32(uint32_t *hi, uint32_t *lo, const uint8_t *mac)