Merge "Extract only GPL-licensed bmoca driver from vendor/broadcom/drivers."
diff --git a/3.3/bmoca.c b/3.3/bmoca.c
new file mode 100644
index 0000000..e8f1829
--- /dev/null
+++ b/3.3/bmoca.c
@@ -0,0 +1,2656 @@
+/*
+ * Copyright (C) 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ <:label-BRCM::GPL:standard
+ :>
+
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/poll.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/version.h>
+#include <linux/scatterlist.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <net/net_namespace.h>
+
+#define DRV_VERSION		0x00040000
+#define DRV_BUILD_NUMBER	0x20110831
+
+
+#if defined(CONFIG_BRCMSTB)
+
+#define MOCA6816		0
+#undef DSL_MOCA
+#undef CONFIG_BCM_6802_MoCA
+#include <linux/bmoca.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
+#include <linux/brcmstb/brcmstb.h>
+#else
+#include <asm/brcmstb/brcmstb.h>
+#endif
+
+#elif defined(DSL_MOCA)
+
+#define MOCA6816		1
+#include "bmoca.h"
+//#include <boardparms.h>
+//#include <bcm3450.h>
+/* board.h cannot declare spinlock, so do it here */
+extern spinlock_t bcm_gpio_spinlock;
+#include <linux/netdevice.h>
+
+#if defined(CONFIG_BCM_6802_MoCA)
+//#include <board.h>
+#endif
+
+#else
+
+#define MOCA6816		1
+#include <linux/bmoca.h>
+
+#endif
+
+#define MOCA_ENABLE		1
+#define MOCA_DISABLE		0
+
+#define OFF_PKT_REINIT_MEM	0x00a08000
+#define PKT_REINIT_MEM_SIZE	(32 * 1024)
+#define PKT_REINIT_MEM_END	(OFF_PKT_REINIT_MEM  + PKT_REINIT_MEM_SIZE)
+
+/* The mailbox layout is different for MoCA 2.0 compared to
+   MoCA 1.1 */
+
+/* MoCA 1.1 mailbox layout */
+#define HOST_REQ_SIZE_11        304
+#define HOST_RESP_SIZE_11       256
+#define CORE_REQ_SIZE_11        400
+#define CORE_RESP_SIZE_11       64
+
+/* MoCA 1.1 offsets from the mailbox pointer */
+#define HOST_REQ_OFFSET_11      0
+#define HOST_RESP_OFFSET_11     (HOST_REQ_OFFSET_11 + HOST_REQ_SIZE_11)
+#define CORE_REQ_OFFSET_11      (HOST_RESP_OFFSET_11 + HOST_RESP_SIZE_11)
+#define CORE_RESP_OFFSET_11     (CORE_REQ_OFFSET_11 + CORE_REQ_SIZE_11)
+
+/* MoCA 2.0 mailbox layout */
+#define HOST_REQ_SIZE_20        512
+#define HOST_RESP_SIZE_20       512
+#define CORE_REQ_SIZE_20        512
+#define CORE_RESP_SIZE_20       512
+
+/* MoCA 2.0 offsets from the mailbox pointer */
+#define HOST_REQ_OFFSET_20      0
+#define HOST_RESP_OFFSET_20     (HOST_REQ_OFFSET_20 + 0)
+#define CORE_REQ_OFFSET_20      (HOST_RESP_OFFSET_20 + HOST_RESP_SIZE_20)
+#define CORE_RESP_OFFSET_20     (CORE_REQ_OFFSET_20 + 0)
+
+#define HOST_REQ_SIZE_MAX       HOST_REQ_SIZE_20
+#define CORE_REQ_SIZE_MAX       CORE_REQ_SIZE_20
+#define CORE_RESP_SIZE_MAX      CORE_RESP_SIZE_20
+
+/* local H2M, M2H buffers */
+#define NUM_CORE_MSG		32
+#define NUM_HOST_MSG		8
+
+#define FW_CHUNK_SIZE		4096
+#define MAX_BL_CHUNKS		8
+#define MAX_FW_SIZE		(1024 * 1024)
+#define MAX_FW_PAGES		((MAX_FW_SIZE >> PAGE_SHIFT) + 1)
+#define MAX_LAB_PRINTF		104
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define M2M_WRITE		((1 << 31) | (1 << 27) | (1 << 28))
+#define M2M_READ		((1 << 30) | (1 << 27) | (1 << 28))
+#else
+#define M2M_WRITE		((1 << 31) | (1 << 27))
+#define M2M_READ		((1 << 30) | (1 << 27))
+#endif
+
+#define M2M_TIMEOUT_MS		10
+
+#define NO_FLUSH_IRQ		0
+#define FLUSH_IRQ		1
+#define FLUSH_DMA_ONLY		2
+#define FLUSH_REQRESP_ONLY	3
+
+#define DEFAULT_PHY_CLOCK       300000000
+
+
+/* DMA buffers may not share a cache line with anything else */
+#define __DMA_ALIGN__		__attribute__((__aligned__(L1_CACHE_BYTES)))
+
+struct moca_host_msg {
+	u32			data[HOST_REQ_SIZE_MAX / 4] __DMA_ALIGN__;
+	struct list_head	chain __DMA_ALIGN__;
+	u32			len;
+};
+
+struct moca_core_msg {
+	u32			data[CORE_REQ_SIZE_MAX / 4] __DMA_ALIGN__;
+	struct list_head	chain __DMA_ALIGN__;
+	u32			len;
+};
+
+struct moca_regs {
+	unsigned int	data_mem_offset;
+	unsigned int	data_mem_size;
+	unsigned int	cntl_mem_size;
+	unsigned int	cntl_mem_offset;
+	unsigned int	gp0_offset;
+	unsigned int	gp1_offset;
+	unsigned int	ringbell_offset;
+	unsigned int	l2_status_offset;
+	unsigned int	l2_clear_offset;
+	unsigned int	l2_mask_set_offset;
+	unsigned int	l2_mask_clear_offset;
+	unsigned int	sw_reset_offset;
+	unsigned int	led_ctrl_offset;
+	unsigned int	m2m_src_offset;
+	unsigned int	m2m_dst_offset;
+	unsigned int	m2m_cmd_offset;
+	unsigned int	m2m_status_offset;
+	unsigned int	host2moca_mmp_outbox_0_offset;
+	unsigned int	moca2host_mmp_inbox_0_offset;
+	unsigned int	moca2host_mmp_inbox_1_offset;
+	unsigned int	moca2host_mmp_inbox_2_offset;
+	unsigned int	h2m_resp_bit[2]; /* indexed by cpu */
+	unsigned int	h2m_req_bit[2]; /* indexed by cpu */
+	unsigned int	sideband_gmii_fc_offset;
+#if MOCA6816
+	unsigned int	pmb_master_wdata_offset;
+	unsigned int	pmb_master_cmd_offset;
+	unsigned int	pmb_master_status;
+#endif
+};
+
+struct moca_priv_data {
+	struct platform_device	*pdev;
+	struct device		*dev;
+
+	unsigned int		minor;
+	int			irq;
+	struct work_struct	work;
+	void __iomem		*base;
+	void __iomem		*i2c_base;
+
+	unsigned int		mbx_offset[2]; /* indexed by MoCA cpu */
+	struct page		*fw_pages[MAX_FW_PAGES];
+	struct scatterlist	fw_sg[MAX_FW_PAGES];
+	struct completion	copy_complete;
+	struct completion	chunk_complete;
+
+	struct list_head	host_msg_free_list;
+	struct list_head	host_msg_pend_list;
+	struct moca_host_msg	host_msg_queue[NUM_HOST_MSG] __DMA_ALIGN__;
+	wait_queue_head_t	host_msg_wq;
+
+	struct list_head	core_msg_free_list;
+	struct list_head	core_msg_pend_list;
+	u32		core_resp_buf[CORE_RESP_SIZE_MAX / 4] __DMA_ALIGN__;
+	struct moca_core_msg	core_msg_queue[NUM_CORE_MSG] __DMA_ALIGN__;
+	struct moca_core_msg	core_msg_temp __DMA_ALIGN__;
+	wait_queue_head_t	core_msg_wq;
+
+	spinlock_t		list_lock;
+	spinlock_t		clock_lock;
+	struct mutex		irq_status_mutex;
+	struct mutex		dev_mutex;
+	struct mutex		copy_mutex;
+	struct mutex		moca_i2c_mutex;
+	int			host_mbx_busy;
+	int			host_resp_pending;
+	int			core_req_pending;
+	int			assert_pending;
+	int			wdt_pending;
+
+	int			enabled;
+	int			running;
+	int			wol_enabled;
+	struct clk		*clk;
+	struct clk		*phy_clk;
+	struct clk		*cpu_clk;
+
+	int			refcount;
+	unsigned long		start_time;
+	dma_addr_t		tpcapBufPhys;
+
+	unsigned int		bonded_mode;
+	unsigned int		phy_freq;
+
+	unsigned int		hw_rev;
+	struct moca_regs	*regs;
+
+	/* MMP Parameters */
+	unsigned int	mmp_20;
+	unsigned int	host_req_size;
+	unsigned int	host_resp_size;
+	unsigned int	core_req_size;
+	unsigned int	core_resp_size;
+	unsigned int	host_req_offset;
+	unsigned int	host_resp_offset;
+	unsigned int	core_req_offset;
+	unsigned int	core_resp_offset;
+
+};
+
+#if MOCA6816
+struct moca_regs regs_6802 = {
+	.data_mem_offset = 0,
+	.data_mem_size = (640 * 1024),
+	.cntl_mem_offset = 0x00108000,
+	.cntl_mem_size = (384 * 1024),
+	.gp0_offset = 0,
+	.gp1_offset = 0,
+	.ringbell_offset = 0x001ffd0c,
+	.l2_status_offset = 0x001ffc40,
+	.l2_clear_offset = 0x001ffc48,
+	.l2_mask_set_offset = 0x001ffc50,
+	.l2_mask_clear_offset = 0x001ffc54,
+	.sw_reset_offset = 0x001ffd00,
+	.led_ctrl_offset = 0,
+	.m2m_src_offset = 0x001ffc00,
+	.m2m_dst_offset = 0x001ffc04,
+	.m2m_cmd_offset = 0x001ffc08,
+	.m2m_status_offset = 0x001ffc0c,
+	.host2moca_mmp_outbox_0_offset = 0x001ffd18,
+	.moca2host_mmp_inbox_0_offset = 0x001ffd58,
+	.moca2host_mmp_inbox_1_offset = 0x001ffd5c,
+	.moca2host_mmp_inbox_2_offset = 0x001ffd60,
+	.h2m_resp_bit[1] = 0x10,
+	.h2m_req_bit[1] = 0x20,
+	.h2m_resp_bit[0] = 0x1,
+	.h2m_req_bit[0] = 0x2,
+	.sideband_gmii_fc_offset = 0x001fec18,
+	.pmb_master_status       = 0x001ffcc0,
+	.pmb_master_wdata_offset = 0x001ffcc8,
+	.pmb_master_cmd_offset   = 0x001ffccc
+};
+
+struct moca_regs regs_6816 = {
+	.data_mem_offset = 0,
+	.data_mem_size = (256 * 1024),
+	.cntl_mem_offset = 0x0004c000,
+	.cntl_mem_size = (80 * 1024),
+	.gp0_offset = 0x000a1418,
+	.gp1_offset = 0x000a141c,
+	.ringbell_offset = 0x000a1404,
+	.l2_status_offset = 0x000a2080,
+	.l2_clear_offset = 0x000a2088,
+	.l2_mask_set_offset = 0x000a2090,
+	.l2_mask_clear_offset = 0x000a2094,
+	.sw_reset_offset = 0x000a2040,
+	.led_ctrl_offset = 0x000a204c,
+	.m2m_src_offset = 0x000a2000,
+	.m2m_dst_offset = 0x000a2004,
+	.m2m_cmd_offset = 0x000a2008,
+	.m2m_status_offset = 0x000a200c,
+	.h2m_resp_bit[1] = 0x1,
+	.h2m_req_bit[1] = 0x2,
+	.sideband_gmii_fc_offset = 0x000a1420
+};
+
+#else
+
+struct moca_regs regs_11_plus = {
+	.data_mem_offset = 0,
+	.data_mem_size = (256 * 1024),
+	.cntl_mem_offset = 0x00040000,
+	.cntl_mem_size = (128 * 1024),
+	.gp0_offset = 0x000a2050,
+	.gp1_offset = 0x000a2054,
+	.ringbell_offset = 0x000a2060,
+	.l2_status_offset = 0x000a2080,
+	.l2_clear_offset = 0x000a2088,
+	.l2_mask_set_offset = 0x000a2090,
+	.l2_mask_clear_offset = 0x000a2094,
+	.sw_reset_offset = 0x000a2040,
+	.led_ctrl_offset = 0x000a204c,
+	.led_ctrl_offset = 0x000a204c,
+	.m2m_src_offset = 0x000a2000,
+	.m2m_dst_offset = 0x000a2004,
+	.m2m_cmd_offset = 0x000a2008,
+	.m2m_status_offset = 0x000a200c,
+	.h2m_resp_bit[1] = 0x1,
+	.h2m_req_bit[1] = 0x2,
+	.sideband_gmii_fc_offset = 0x000a1420
+};
+
+struct moca_regs regs_11_lite = {
+	.data_mem_offset = 0,
+	.data_mem_size = (96 * 1024),
+	.cntl_mem_offset = 0x0004c000,
+	.cntl_mem_size = (80 * 1024),
+	.gp0_offset = 0x000a2050,
+	.gp1_offset = 0x000a2054,
+	.ringbell_offset = 0x000a2060,
+	.l2_status_offset = 0x000a2080,
+	.l2_clear_offset = 0x000a2088,
+	.l2_mask_set_offset = 0x000a2090,
+	.l2_mask_clear_offset = 0x000a2094,
+	.sw_reset_offset = 0x000a2040,
+	.led_ctrl_offset = 0x000a204c,
+	.led_ctrl_offset = 0x000a204c,
+	.m2m_src_offset = 0x000a2000,
+	.m2m_dst_offset = 0x000a2004,
+	.m2m_cmd_offset = 0x000a2008,
+	.m2m_status_offset = 0x000a200c,
+	.h2m_resp_bit[1] = 0x1,
+	.h2m_req_bit[1] = 0x2,
+	.sideband_gmii_fc_offset = 0x000a1420
+};
+
+struct moca_regs regs_11 = {
+	.data_mem_offset = 0,
+	.data_mem_size = (256 * 1024),
+	.cntl_mem_offset = 0x0004c000,
+	.cntl_mem_size = (80 * 1024),
+	.gp0_offset = 0x000a2050,
+	.gp1_offset = 0x000a2054,
+	.ringbell_offset = 0x000a2060,
+	.l2_status_offset = 0x000a2080,
+	.l2_clear_offset = 0x000a2088,
+	.l2_mask_set_offset = 0x000a2090,
+	.l2_mask_clear_offset = 0x000a2094,
+	.sw_reset_offset = 0x000a2040,
+	.led_ctrl_offset = 0x000a204c,
+	.m2m_src_offset = 0x000a2000,
+	.m2m_dst_offset = 0x000a2004,
+	.m2m_cmd_offset = 0x000a2008,
+	.m2m_status_offset = 0x000a200c,
+	.h2m_resp_bit[1] = 0x1,
+	.h2m_req_bit[1] = 0x2,
+	.sideband_gmii_fc_offset = 0x000a1420
+};
+
+struct moca_regs regs_20 = {
+	.data_mem_offset = 0,
+	.data_mem_size = (288 * 1024),
+	.cntl_mem_offset = 0x00120000,
+	.cntl_mem_size = (384 * 1024),
+	.gp0_offset = 0,
+	.gp1_offset = 0,
+	.ringbell_offset = 0x001ffd0c,
+	.l2_status_offset = 0x001ffc40,
+	.l2_clear_offset = 0x001ffc48,
+	.l2_mask_set_offset = 0x001ffc50,
+	.l2_mask_clear_offset = 0x001ffc54,
+	.sw_reset_offset = 0x001ffd00,
+	.led_ctrl_offset = 0,
+	.m2m_src_offset = 0x001ffc00,
+	.m2m_dst_offset = 0x001ffc04,
+	.m2m_cmd_offset = 0x001ffc08,
+	.m2m_status_offset = 0x001ffc0c,
+	.host2moca_mmp_outbox_0_offset = 0x001ffd18,
+	.moca2host_mmp_inbox_0_offset = 0x001ffd58,
+	.moca2host_mmp_inbox_1_offset = 0x001ffd5c,
+	.moca2host_mmp_inbox_2_offset = 0x001ffd60,
+	.h2m_resp_bit[1] = 0x10,
+	.h2m_req_bit[1] = 0x20,
+	.h2m_resp_bit[0] = 0x1,
+	.h2m_req_bit[0] = 0x2,
+	.sideband_gmii_fc_offset = 0x001fec18
+};
+
+#endif
+
+#define MOCA_FW_MAGIC		0x4d6f4341
+
+struct moca_fw_hdr {
+	uint32_t		jump[2];
+	uint32_t		length;
+	uint32_t		cpuid;
+	uint32_t		magic;
+	uint32_t		hw_rev;
+	uint32_t		bl_chunks;
+	uint32_t		res1;
+};
+
+struct bsc_regs {
+	u32			chip_address;
+	u32			data_in[8];
+	u32			cnt_reg;
+	u32			ctl_reg;
+	u32			iic_enable;
+	u32			data_out[8];
+	u32			ctlhi_reg;
+	u32			scl_param;
+};
+
+
+/* support for multiple MoCA devices */
+#define NUM_MINORS		8
+static struct moca_priv_data *minor_tbl[NUM_MINORS];
+static struct class *moca_class;
+
+/* character major device number */
+#define MOCA_MAJOR		234
+#define MOCA_CLASS		"bmoca"
+
+#define M2H_RESP		(1 << 0)
+#define M2H_REQ			(1 << 1)
+#define M2H_ASSERT		(1 << 2)
+#define M2H_NEXTCHUNK		(1 << 3)
+#define M2H_NEXTCHUNK_CPU0		(1<<4)
+#define M2H_WDT_CPU1			(1 << 10)
+#define M2H_WDT_CPU0			(1 << 6)
+#define M2H_DMA			(1 << 11)
+
+#define M2H_RESP_CPU0	(1 << 13)
+#define M2H_REQ_CPU0		(1 << 14)
+#define M2H_ASSERT_CPU0	(1 << 15)
+
+/* does this word contain a NIL byte (i.e. end of string)? */
+#define HAS0(x)			((((x) & 0xff) == 0) || \
+				 (((x) & 0xff00) == 0) || \
+				 (((x) & 0xff0000) == 0) || \
+				 (((x) & 0xff000000) == 0))
+
+#define MOCA_SET(x, y)		do { \
+	MOCA_WR(x, MOCA_RD(x) | (y)); \
+} while (0)
+#define MOCA_UNSET(x, y)	do { \
+	MOCA_WR(x, MOCA_RD(x) & ~(y)); \
+} while (0)
+
+static void moca_3450_write_i2c(struct moca_priv_data *priv, u8 addr, u32 data);
+static u32 moca_3450_read_i2c(struct moca_priv_data *priv, u8 addr);
+static int moca_get_mbx_offset(struct moca_priv_data *priv);
+static u32 moca_irq_status(struct moca_priv_data *priv, int flush);
+
+#define INRANGE(x, a, b)	(((x) >= (a)) && ((x) < (b)))
+
+static inline int moca_range_ok(struct moca_priv_data *priv,
+	unsigned long offset, unsigned long len)
+{
+	unsigned long lastad = offset + len - 1;
+
+	if (lastad < offset)
+		return -EINVAL;
+
+	if (INRANGE(offset, priv->regs->cntl_mem_offset,
+		priv->regs->cntl_mem_offset+priv->regs->cntl_mem_size) &&
+		INRANGE(lastad, priv->regs->cntl_mem_offset,
+		priv->regs->cntl_mem_offset+priv->regs->cntl_mem_size))
+		return 0;
+
+	if (INRANGE(offset, priv->regs->data_mem_offset,
+		priv->regs->data_mem_offset + priv->regs->data_mem_size) &&
+		INRANGE(lastad, priv->regs->data_mem_offset,
+		priv->regs->data_mem_offset + priv->regs->data_mem_size))
+		return 0;
+
+	if (INRANGE(offset, OFF_PKT_REINIT_MEM, PKT_REINIT_MEM_END) &&
+		INRANGE(lastad, OFF_PKT_REINIT_MEM, PKT_REINIT_MEM_END))
+		return 0;
+
+	return -EINVAL;
+}
+
+static void moca_mmp_init(struct moca_priv_data *priv, int is20)
+{
+	if (is20) {
+		priv->host_req_size    = HOST_REQ_SIZE_20;
+		priv->host_resp_size   = HOST_RESP_SIZE_20;
+		priv->core_req_size    = CORE_REQ_SIZE_20;
+		priv->core_resp_size   = CORE_RESP_SIZE_20;
+		priv->host_req_offset  = HOST_REQ_OFFSET_20;
+		priv->host_resp_offset = HOST_RESP_OFFSET_20;
+		priv->core_req_offset  = CORE_REQ_OFFSET_20;
+		priv->core_resp_offset = CORE_RESP_OFFSET_20;
+		priv->mmp_20 = 1;
+	} else {
+		priv->host_req_size    = HOST_REQ_SIZE_11;
+		priv->host_resp_size   = HOST_RESP_SIZE_11;
+		priv->core_req_size    = CORE_REQ_SIZE_11;
+		priv->core_resp_size   = CORE_RESP_SIZE_11;
+		priv->host_req_offset  = HOST_REQ_OFFSET_11;
+		priv->host_resp_offset = HOST_RESP_OFFSET_11;
+		priv->core_req_offset  = CORE_REQ_OFFSET_11;
+		priv->core_resp_offset = CORE_RESP_OFFSET_11;
+		priv->mmp_20 = 0;
+	}
+}
+
+static int moca_is_20(struct moca_priv_data *priv)
+{
+	return ((priv->hw_rev & MOCA_PROTVER_MASK) == MOCA_PROTVER_20);
+}
+
+#ifdef CONFIG_BRCM_MOCA_BUILTIN_FW
+#error Not supported in this version
+#else
+static const char *bmoca_fw_image;
+#endif
+
+#if MOCA6816
+#if defined(CONFIG_BCM_6802_MoCA)
+#include "bmoca-6802.c"
+#else
+#include "bmoca-6816.c"
+#endif
+#else
+
+/*
+ * LOW-LEVEL DEVICE OPERATIONS
+ */
+
+#define MOCA_RD(x)		__raw_readl((void __iomem *)(x))
+#define MOCA_WR(x, y)		__raw_writel((y), (void __iomem *)(x))
+
+#define I2C_RD(x)		MOCA_RD(x)
+#define I2C_WR(x, y)		MOCA_WR(x, y)
+
+static int hw_specific_init(struct moca_priv_data *priv)
+{
+	return 0;
+}
+
+static void moca_hw_reset(struct moca_priv_data *priv)
+{
+	/* disable and clear all interrupts */
+	MOCA_WR(priv->base + priv->regs->l2_mask_set_offset, 0xffffffff);
+	MOCA_RD(priv->base + priv->regs->l2_mask_set_offset);
+
+	/* assert resets */
+
+	/* reset CPU first, both CPUs for MoCA 20 HW */
+	if (moca_is_20(priv))
+		MOCA_SET(priv->base + priv->regs->sw_reset_offset, 5);
+	else
+		MOCA_SET(priv->base + priv->regs->sw_reset_offset, 1);
+
+	MOCA_RD(priv->base + priv->regs->sw_reset_offset);
+
+	udelay(20);
+
+	/* reset everything else except clocks */
+	MOCA_SET(priv->base + priv->regs->sw_reset_offset,
+		~((1 << 3) | (1 << 7)));
+	MOCA_RD(priv->base + priv->regs->sw_reset_offset);
+
+	udelay(20);
+
+	/* disable clocks */
+	MOCA_SET(priv->base + priv->regs->sw_reset_offset, ~(1 << 3));
+	MOCA_RD(priv->base + priv->regs->sw_reset_offset);
+
+	MOCA_WR(priv->base + priv->regs->l2_clear_offset, 0xffffffff);
+	MOCA_RD(priv->base + priv->regs->l2_clear_offset);
+}
+
+/* called any time we start/restart/stop MoCA */
+static void moca_hw_init(struct moca_priv_data *priv, int action)
+{
+	if (action == MOCA_ENABLE && !priv->enabled) {
+		clk_enable(priv->clk);
+		clk_enable(priv->phy_clk);
+		clk_enable(priv->cpu_clk);
+		priv->enabled = 1;
+	}
+
+	/* clock not enabled, register accesses will fail with bus error */
+	if (!priv->enabled)
+		return;
+
+	moca_hw_reset(priv);
+	udelay(1);
+
+	if (action == MOCA_ENABLE) {
+		/* deassert moca_sys_reset and clock */
+		MOCA_UNSET(priv->base + priv->regs->sw_reset_offset,
+			(1 << 1) | (1 << 7));
+
+		if (priv->hw_rev >= HWREV_MOCA_20_GEN22) {
+			/* Take PHY0 out of reset and enable clock */
+			MOCA_UNSET(priv->base + priv->regs->sw_reset_offset,
+				(1<<4) | (1<<8));
+
+			if (priv->bonded_mode) {
+				/* Take PHY1 out of reset and enable clock */
+				MOCA_UNSET(priv->base +
+					priv->regs->sw_reset_offset,
+					(1<<5) | (1<<9));
+			}
+		}
+		MOCA_RD(priv->base + priv->regs->sw_reset_offset);
+	}
+
+
+	if (!moca_is_20(priv)) {
+		/* clear junk out of GP0/GP1 */
+		MOCA_WR(priv->base + priv->regs->gp0_offset, 0xffffffff);
+		MOCA_WR(priv->base + priv->regs->gp1_offset, 0x0);
+		/* set up activity LED for 50% duty cycle */
+		MOCA_WR(priv->base + priv->regs->led_ctrl_offset,
+			0x40004000);
+	}
+
+	/* enable DMA completion interrupts */
+	MOCA_WR(priv->base + priv->regs->ringbell_offset, 0);
+	MOCA_WR(priv->base + priv->regs->l2_mask_clear_offset, M2H_DMA);
+	MOCA_RD(priv->base + priv->regs->l2_mask_clear_offset);
+
+	if (action == MOCA_DISABLE && priv->enabled) {
+		priv->enabled = 0;
+		clk_disable(priv->clk);
+		clk_disable(priv->phy_clk);
+		clk_disable(priv->cpu_clk);
+	}
+}
+
+static void moca_ringbell(struct moca_priv_data *priv, u32 mask)
+{
+	MOCA_WR(priv->base + priv->regs->ringbell_offset, mask);
+	MOCA_RD(priv->base + priv->regs->ringbell_offset);
+}
+
+static void moca_enable_irq(struct moca_priv_data *priv)
+{
+	if (!priv->enabled) return;
+
+	/* unmask everything */
+	u32 mask = M2H_REQ | M2H_RESP | M2H_ASSERT | M2H_WDT_CPU1 |
+		M2H_NEXTCHUNK | M2H_DMA;
+
+	if (moca_is_20(priv))
+		mask |= M2H_WDT_CPU0 | M2H_NEXTCHUNK_CPU0 |
+			M2H_REQ_CPU0 | M2H_RESP_CPU0 | M2H_ASSERT_CPU0;
+
+	MOCA_WR(priv->base + priv->regs->l2_mask_clear_offset, mask);
+	MOCA_RD(priv->base + priv->regs->l2_mask_clear_offset);
+}
+
+static void moca_disable_irq(struct moca_priv_data *priv)
+{
+	/* mask everything except DMA completions */
+	u32 mask = M2H_REQ | M2H_RESP | M2H_ASSERT | M2H_WDT_CPU1 |
+		M2H_NEXTCHUNK;
+
+	if (moca_is_20(priv))
+		mask |= M2H_WDT_CPU0 | M2H_NEXTCHUNK_CPU0 |
+			M2H_REQ_CPU0 | M2H_RESP_CPU0 | M2H_ASSERT_CPU0;
+
+	MOCA_WR(priv->base + priv->regs->l2_mask_set_offset, mask);
+	MOCA_RD(priv->base + priv->regs->l2_mask_set_offset);
+}
+
+static u32 moca_start_mips(struct moca_priv_data *priv, u32 cpu)
+{
+	if (moca_is_20(priv)) {
+		if (cpu == 1)
+			MOCA_UNSET(priv->base + priv->regs->sw_reset_offset,
+				(1 << 0));
+		else {
+			moca_mmp_init(priv, 1);
+			MOCA_UNSET(priv->base + priv->regs->sw_reset_offset,
+				(1 << 2));
+		}
+	} else
+		MOCA_UNSET(priv->base + priv->regs->sw_reset_offset, (1 << 0));
+	MOCA_RD(priv->base + priv->regs->sw_reset_offset);
+	return 0;
+}
+
+static void moca_m2m_xfer(struct moca_priv_data *priv,
+	u32 dst, u32 src, u32 ctl)
+{
+	u32 status;
+
+	MOCA_WR(priv->base + priv->regs->m2m_src_offset, src);
+	MOCA_WR(priv->base + priv->regs->m2m_dst_offset, dst);
+	MOCA_WR(priv->base + priv->regs->m2m_status_offset, 0);
+	MOCA_RD(priv->base + priv->regs->m2m_status_offset);
+	MOCA_WR(priv->base + priv->regs->m2m_cmd_offset, ctl);
+
+	if (wait_for_completion_timeout(&priv->copy_complete,
+		1000 * M2M_TIMEOUT_MS) <= 0) {
+		printk(KERN_WARNING "%s: DMA interrupt timed out, status %x\n",
+			__func__, moca_irq_status(priv, NO_FLUSH_IRQ));
+	}
+
+	status = MOCA_RD(priv->base + priv->regs->m2m_status_offset);
+
+	if (status & (3 << 29))
+		printk(KERN_WARNING "%s: bad status %08x "
+			"(s/d/c %08x %08x %08x)\n", __func__,
+			status, src, dst, ctl);
+}
+
+static void moca_write_mem(struct moca_priv_data *priv,
+	u32 dst_offset, void *src, unsigned int len)
+{
+	dma_addr_t pa;
+
+	if (moca_range_ok(priv, dst_offset, len) < 0) {
+		printk(KERN_WARNING "%s: copy past end of cntl memory: %08x\n",
+			__func__, dst_offset);
+		return;
+	}
+
+	pa = dma_map_single(&priv->pdev->dev, src, len, DMA_TO_DEVICE);
+	moca_m2m_xfer(priv, dst_offset + priv->regs->data_mem_offset, (u32)pa,
+		len | M2M_WRITE);
+	dma_unmap_single(&priv->pdev->dev, pa, len, DMA_TO_DEVICE);
+}
+
+static void moca_read_mem(struct moca_priv_data *priv,
+	void *dst, u32 src_offset, unsigned int len)
+{
+	int i;
+
+	if (moca_range_ok(priv, src_offset, len) < 0) {
+		printk(KERN_WARNING "%s: copy past end of cntl memory: %08x\n",
+			__func__, src_offset);
+		return;
+	}
+
+	for (i = 0; i < len; i += 4)
+		DEV_WR(dst + i, cpu_to_be32(
+			MOCA_RD(priv->base + src_offset +
+				priv->regs->data_mem_offset + i)));
+}
+
+static void moca_write_sg(struct moca_priv_data *priv,
+	u32 dst_offset, struct scatterlist *sg, int nents)
+{
+	int j;
+	uintptr_t addr = priv->regs->data_mem_offset + dst_offset;
+
+	dma_map_sg(&priv->pdev->dev, sg, nents, DMA_TO_DEVICE);
+
+	for (j = 0; j < nents; j++) {
+		moca_m2m_xfer(priv, addr, (u32)sg[j].dma_address,
+			sg[j].length | M2M_WRITE);
+
+		addr += sg[j].length;
+	}
+
+	dma_unmap_sg(&priv->pdev->dev, sg, nents, DMA_TO_DEVICE);
+}
+
+static inline void moca_read_sg(struct moca_priv_data *priv,
+	u32 src_offset, struct scatterlist *sg, int nents)
+{
+	int j;
+	uintptr_t addr = priv->regs->data_mem_offset + src_offset;
+
+	dma_map_sg(&priv->pdev->dev, sg, nents, DMA_FROM_DEVICE);
+
+	for (j = 0; j < nents; j++) {
+		moca_m2m_xfer(priv, (u32)sg[j].dma_address, addr,
+			sg[j].length | M2M_READ);
+
+		addr += sg[j].length;
+		SetPageDirty(sg_page(&sg[j]));
+	}
+
+	dma_unmap_sg(&priv->pdev->dev, sg, nents, DMA_FROM_DEVICE);
+}
+
+#define moca_3450_write moca_3450_write_i2c
+#define moca_3450_read moca_3450_read_i2c
+#endif
+
+// Can be called from MoCA ISR
+static u32 moca_irq_status_no_lock(struct moca_priv_data *priv, int flush)
+{
+	u32 stat;
+	u32 dma_mask = M2H_DMA | M2H_NEXTCHUNK;
+
+	if (moca_is_20(priv))
+		dma_mask |= M2H_NEXTCHUNK_CPU0;
+
+	stat = MOCA_RD(priv->base + priv->regs->l2_status_offset);
+
+	if (flush == FLUSH_IRQ) {
+		MOCA_WR(priv->base + priv->regs->l2_clear_offset, stat);
+		MOCA_RD(priv->base + priv->regs->l2_clear_offset);
+	}
+	if (flush == FLUSH_DMA_ONLY) {
+		MOCA_WR(priv->base + priv->regs->l2_clear_offset,
+			stat & dma_mask);
+		MOCA_RD(priv->base + priv->regs->l2_clear_offset);
+	}
+	if (flush == FLUSH_REQRESP_ONLY) {
+		MOCA_WR(priv->base + priv->regs->l2_clear_offset,
+			stat & (M2H_RESP | M2H_REQ |
+			M2H_RESP_CPU0 | M2H_REQ_CPU0));
+		MOCA_RD(priv->base + priv->regs->l2_clear_offset);
+	}
+
+	return stat;
+}
+
+// Must have MoCA ISR disabled (moca_disable_irq) to call
+static u32 moca_irq_status(struct moca_priv_data *priv, int flush)
+{
+	u32 stat;
+
+	mutex_lock(&priv->irq_status_mutex);
+
+	stat = moca_irq_status_no_lock(priv, flush);
+
+	mutex_unlock(&priv->irq_status_mutex);
+
+	return stat;
+}
+
+static void moca_put_pages(struct moca_priv_data *priv, int pages)
+{
+	int i;
+
+	for (i = 0; i < pages; i++)
+		page_cache_release(priv->fw_pages[i]);
+}
+
+static int moca_get_pages(struct moca_priv_data *priv, unsigned long addr,
+	int size, unsigned int moca_addr, int write)
+{
+	unsigned int pages, chunk_size;
+	int ret, i;
+
+	if (addr & 3)
+		return -EINVAL;
+	if ((size <= 0) || (size > MAX_FW_SIZE))
+		return -EINVAL;
+
+	pages = ((addr & ~PAGE_MASK) + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+	down_read(&current->mm->mmap_sem);
+	ret = get_user_pages(current, current->mm, addr & PAGE_MASK, pages,
+		write, 0, priv->fw_pages, NULL);
+	up_read(&current->mm->mmap_sem);
+
+	if (ret < 0)
+		return ret;
+	BUG_ON((ret > MAX_FW_PAGES) || (pages == 0));
+
+	if (ret < pages) {
+		printk(KERN_WARNING "%s: get_user_pages returned %d, "
+			"expecting %d\n", __func__, ret, pages);
+		moca_put_pages(priv, ret);
+		return -EFAULT;
+	}
+
+	chunk_size = PAGE_SIZE - (addr & ~PAGE_MASK);
+	if (size < chunk_size)
+		chunk_size = size;
+
+	sg_set_page(&priv->fw_sg[0], priv->fw_pages[0], chunk_size,
+		addr & ~PAGE_MASK);
+	size -= chunk_size;
+
+	for (i = 1; i < pages; i++) {
+		sg_set_page(&priv->fw_sg[i], priv->fw_pages[i],
+			size > PAGE_SIZE ? PAGE_SIZE : size, 0);
+		size -= PAGE_SIZE;
+	}
+	return ret;
+}
+
+static int moca_write_img(struct moca_priv_data *priv, struct moca_xfer *x)
+{
+	int pages, i, ret = -EINVAL;
+	struct moca_fw_hdr hdr;
+	u32 bl_chunks;
+
+	if (copy_from_user(&hdr, (void __user *)(unsigned long)x->buf,
+			sizeof(hdr)))
+		return -EFAULT;
+
+	bl_chunks = be32_to_cpu(hdr.bl_chunks);
+	if (!bl_chunks || (bl_chunks > MAX_BL_CHUNKS))
+		bl_chunks = 1;
+
+	pages = moca_get_pages(priv, (unsigned long)x->buf, x->len, 0, 0);
+	if (pages < 0)
+		return pages;
+	if (pages < (bl_chunks + 2))
+		goto out;
+
+	/* host must use FW_CHUNK_SIZE MMU pages (for now) */
+	BUG_ON(FW_CHUNK_SIZE != PAGE_SIZE);
+
+	/* write the first two chunks, then start the MIPS */
+	moca_write_sg(priv, 0, &priv->fw_sg[0], bl_chunks + 1);
+
+#if defined(CONFIG_BCM_6802_MoCA)
+	/* 6802 doesn't need a handshake between blocks, the timing
+		is guaranteed.  Eliminating the handshake cuts the time
+		required to load firmware */
+	moca_start_mips(priv, be32_to_cpu(hdr.cpuid));
+	udelay(5);
+
+	for (i = bl_chunks + 1; i < pages; i++) {
+		moca_write_sg(priv,
+			priv->regs->data_mem_offset + FW_CHUNK_SIZE * bl_chunks,
+			&priv->fw_sg[i], 1);
+	}
+
+	moca_enable_irq(priv);
+
+#else
+
+	moca_enable_irq(priv);
+	moca_start_mips(priv, be32_to_cpu(hdr.cpuid));
+	/* wait for an ACK, then write each successive chunk */
+	for (i = bl_chunks + 1; i < pages; i++) {
+		if (wait_for_completion_timeout(&priv->chunk_complete,
+			1000 * M2M_TIMEOUT_MS) <= 0) {
+			moca_disable_irq(priv);
+			printk(KERN_WARNING "%s: chunk ack timed out\n",
+				__func__);
+			ret = -EIO;
+			goto out;
+		}
+
+		moca_write_sg(priv,
+			priv->regs->data_mem_offset + FW_CHUNK_SIZE * bl_chunks,
+			&priv->fw_sg[i], 1);
+	}
+
+	/* wait for ACK of last block.  Older firmware images didn't
+	   ACK the last block, so don't return an error */
+	wait_for_completion_timeout(&priv->chunk_complete,
+			1000 * M2M_TIMEOUT_MS / 10);
+
+#endif
+
+	ret = 0;
+
+out:
+	moca_put_pages(priv, pages);
+	return ret;
+}
+
+/*
+ * MESSAGE AND LIST HANDLING
+ */
+
+static void moca_handle_lab_printf(struct moca_priv_data *priv,
+	struct moca_core_msg *m)
+{
+	u32 str_len;
+	u32 str_addr;
+
+	if (priv->mmp_20) {
+		str_len = (be32_to_cpu(m->data[4]) + 3) & ~3;
+		str_addr = be32_to_cpu(m->data[3]) & 0x1fffffff;
+
+		if ((be32_to_cpu(m->data[0]) == 0x3) &&
+		    (be32_to_cpu(m->data[1]) == 12) &&
+		    ((be32_to_cpu(m->data[2]) & 0xffffff) == 0x090801) &&
+		    (be32_to_cpu(m->data[4]) <= MAX_LAB_PRINTF)) {
+			m->len = 3 + str_len;
+			moca_read_mem(priv, &m->data[3], str_addr, str_len);
+
+			m->data[1] = cpu_to_be32(m->len - 8);
+		}
+	} else {
+		str_len = (be32_to_cpu(m->data[3]) + 3) & ~3;
+		str_addr = be32_to_cpu(m->data[2]) & 0x1fffffff;
+
+		if ((be32_to_cpu(m->data[0]) & 0xff0000ff) == 0x09000001 &&
+			be32_to_cpu(m->data[1]) == 0x600b0008 &&
+			(be32_to_cpu(m->data[3]) <= MAX_LAB_PRINTF)) {
+
+			m->len = 8 + str_len;
+			moca_read_mem(priv, &m->data[2], str_addr, str_len);
+
+			m->data[1] = cpu_to_be32((MOCA_IE_DRV_PRINTF << 16) +
+				m->len - 8);
+		}
+	}
+}
+static void moca_msg_reset(struct moca_priv_data *priv)
+{
+	int i;
+
+	if (priv->running)
+		moca_disable_irq(priv);
+	priv->running = 0;
+	priv->host_mbx_busy = 0;
+	priv->host_resp_pending = 0;
+	priv->core_req_pending = 0;
+	priv->assert_pending = 0;
+	priv->mbx_offset[0] = -1;
+	priv->mbx_offset[1] = -1;
+
+	spin_lock_bh(&priv->list_lock);
+	INIT_LIST_HEAD(&priv->core_msg_free_list);
+	INIT_LIST_HEAD(&priv->core_msg_pend_list);
+
+	for (i = 0; i < NUM_CORE_MSG; i++)
+		list_add_tail(&priv->core_msg_queue[i].chain,
+			&priv->core_msg_free_list);
+
+	INIT_LIST_HEAD(&priv->host_msg_free_list);
+	INIT_LIST_HEAD(&priv->host_msg_pend_list);
+
+	for (i = 0; i < NUM_HOST_MSG; i++)
+		list_add_tail(&priv->host_msg_queue[i].chain,
+			&priv->host_msg_free_list);
+	spin_unlock_bh(&priv->list_lock);
+}
+
+static struct list_head *moca_detach_head(struct moca_priv_data *priv,
+	struct list_head *h)
+{
+	struct list_head *r = NULL;
+
+	spin_lock_bh(&priv->list_lock);
+	if (!list_empty(h)) {
+		r = h->next;
+		list_del(r);
+	}
+	spin_unlock_bh(&priv->list_lock);
+
+	return r;
+}
+
+static void moca_attach_tail(struct moca_priv_data *priv,
+	struct list_head *elem, struct list_head *list)
+{
+	spin_lock_bh(&priv->list_lock);
+	list_add_tail(elem, list);
+	spin_unlock_bh(&priv->list_lock);
+}
+
+/* Must have dev_mutex when calling this function */
+static int moca_recvmsg(struct moca_priv_data *priv, uintptr_t offset,
+	u32 max_size, uintptr_t reply_offset, u32 cpuid)
+{
+	struct list_head *ml = NULL;
+	struct moca_core_msg *m;
+	unsigned int w, rw, num_ies;
+	u32 data;
+	char *msg;
+	int err = -ENOMEM;
+	u32 *reply = priv->core_resp_buf;
+	int attach = 1;
+	u32 size;
+	m = &priv->core_msg_temp;
+
+	BUG_ON((uintptr_t)m->data & (L1_CACHE_BYTES - 1));
+
+	/* make sure we have the mailbox offset before using it */
+	if (moca_get_mbx_offset(priv))
+	{
+		err = -EIO;
+		msg = "no mailbox";
+		goto bad;
+	}
+
+	/* read only as much as is necessary.
+	   The second word is the length for mmp_20 */
+	if (priv->mmp_20) {
+		moca_read_mem(priv, m->data,
+			offset + priv->mbx_offset[cpuid], 8);
+
+		size = (be32_to_cpu(m->data[1])+3) & 0xFFFFFFFC;
+		/* if size is too large, this is a protocol error.
+		   mocad will output the error message */
+		if (size > max_size - 8)
+			size = max_size - 8;
+
+		moca_read_mem(priv, &m->data[2],
+			offset + priv->mbx_offset[cpuid] + 8, size);
+	} else
+		moca_read_mem(priv, m->data,
+			offset + priv->mbx_offset[cpuid], max_size);
+
+	data = be32_to_cpu(m->data[0]);
+
+	if (priv->mmp_20) {
+		/* In MoCA 2.0, there is only 1 IE per message */
+		num_ies = 1;
+	} else {
+		num_ies = data & 0xffff;
+	}
+
+	if (reply_offset) {
+		if (priv->mmp_20) {
+			/* In MoCA 2.0, the ACK is to simply set the
+			   MSB in the incoming message and send it
+			   back */
+			reply[0] = cpu_to_be32(data | 0x80000000);
+			rw = 1;
+		} else {
+			/* ACK + seq number + number of IEs */
+			reply[0] = cpu_to_be32((data & 0x00ff0000) |
+				0x04000000 | num_ies);
+			rw = 1;
+		}
+	}
+
+	err = -EINVAL;
+	w = 1;
+	max_size >>= 2;
+	while (num_ies) {
+		if (w >= max_size) {
+			msg = "dropping long message";
+			goto bad;
+		}
+
+		data = be32_to_cpu(m->data[w++]);
+
+		if (reply_offset && !priv->mmp_20) {
+			/*
+			 * ACK each IE in the original message;
+			 * return code is always 0
+			 */
+			if ((rw << 2) >= priv->core_resp_size)
+				printk(KERN_WARNING "%s: Core ack buffer "
+					"overflowed\n", __func__);
+			else {
+				reply[rw] = cpu_to_be32((data & ~0xffff) | 4);
+				rw++;
+				reply[rw] = cpu_to_be32(0);
+				rw++;
+			}
+		}
+		if (data & 3) {
+			printk("ie=%08X\n", data);
+			msg = "IE is not a multiple of 4 bytes";
+			goto bad;
+		}
+
+		w += ((data & 0xffff) >> 2);
+
+		if (w > max_size) {
+			msg = "dropping long message";
+			goto bad;
+		}
+		num_ies--;
+	}
+	m->len = w << 2;
+
+	/* special case for lab_printf traps */
+	moca_handle_lab_printf(priv, m);
+
+	/*
+	 * Check to see if we can add this new message to the current queue.
+	 * The result will be a single message with multiple IEs.
+	 */
+	if (!priv->mmp_20) {
+		spin_lock_bh(&priv->list_lock);
+		if (!list_empty(&priv->core_msg_pend_list)) {
+			ml = priv->core_msg_pend_list.prev;
+			m = list_entry(ml, struct moca_core_msg, chain);
+
+			if (m->len + priv->core_msg_temp.len > max_size)
+				ml = NULL;
+			else {
+				u32 d0 = be32_to_cpu(
+						priv->core_msg_temp.data[0]);
+
+				/* Only concatenate traps from the core */
+				if (((be32_to_cpu(m->data[0]) & 0xff000000) !=
+					0x09000000) ||
+					((d0 & 0xff000000) != 0x09000000))
+					ml = NULL;
+				else {
+					/*
+					 * We can add the message to the
+					 * previous one. Update the num of IEs,
+					 * update the length and copy the data.
+					 */
+					data = be32_to_cpu(m->data[0]);
+					num_ies = data & 0xffff;
+					num_ies += d0 & 0xffff;
+					data &= 0xffff0000;
+					data |= num_ies;
+					m->data[0] = cpu_to_be32(data);
+
+					/*
+					 * Subtract 4 bytes from length for
+					   message header
+					 */
+					memcpy(&m->data[m->len >> 2],
+						&priv->core_msg_temp.data[1],
+						priv->core_msg_temp.len - 4);
+					m->len += priv->core_msg_temp.len - 4;
+					attach = 0;
+				}
+			}
+		}
+		spin_unlock_bh(&priv->list_lock);
+	}
+
+	if (ml == NULL) {
+		ml = moca_detach_head(priv, &priv->core_msg_free_list);
+		if (ml == NULL) {
+			msg = "no entries left on core_msg_free_list";
+			err = -ENOMEM;
+			goto bad;
+		}
+		m = list_entry(ml, struct moca_core_msg, chain);
+
+		memcpy(m->data, priv->core_msg_temp.data,
+			priv->core_msg_temp.len);
+		m->len = priv->core_msg_temp.len;
+	}
+
+	if (reply_offset) {
+		if ((cpuid == 1) &&
+			(moca_irq_status(priv, NO_FLUSH_IRQ) & M2H_ASSERT)) {
+			/* do not retry - message is gone forever */
+			err = 0;
+			msg = "core_req overwritten by assertion";
+			goto bad;
+		}
+		if ((cpuid == 0) &&
+			(moca_irq_status(priv, NO_FLUSH_IRQ)
+			& M2H_ASSERT_CPU0)) {
+			/* do not retry - message is gone forever */
+			err = 0;
+			msg = "core_req overwritten by assertion";
+			goto bad;
+		}
+
+		moca_write_mem(priv, reply_offset + priv->mbx_offset[cpuid],
+			reply, rw << 2);
+		moca_ringbell(priv, priv->regs->h2m_resp_bit[cpuid]);
+	}
+
+	if (attach) {
+		moca_attach_tail(priv, ml, &priv->core_msg_pend_list);
+		wake_up(&priv->core_msg_wq);
+	}
+
+	return 0;
+
+bad:
+	printk(KERN_WARNING "%s: %s\n", __func__, msg);
+
+	if (ml)
+		moca_attach_tail(priv, ml, &priv->core_msg_free_list);
+
+	return err;
+}
+
+static int moca_h2m_sanity_check(struct moca_priv_data *priv,
+	struct moca_host_msg *m)
+{
+	unsigned int w, num_ies;
+	u32 data;
+
+	if (priv->mmp_20) {
+		/* The length is stored in data[1]
+		   plus 8 extra header bytes */
+		data = be32_to_cpu(m->data[1]) + 8;
+		if (data > priv->host_req_size)
+			return -1;
+		else
+			return (int) data;
+	} else {
+		data = be32_to_cpu(m->data[0]);
+		num_ies = data & 0xffff;
+
+		w = 1;
+		while (num_ies) {
+			if (w >= (m->len << 2))
+				return -1;
+
+			data = be32_to_cpu(m->data[w++]);
+
+			if (data & 3)
+				return -1;
+			w += (data & 0xffff) >> 2;
+			num_ies--;
+		}
+		return w << 2;
+	}
+}
+
+
+/* Must have dev_mutex when calling this function */
+static int moca_sendmsg(struct moca_priv_data *priv, u32 cpuid)
+{
+	struct list_head *ml = NULL;
+	struct moca_host_msg *m;
+
+	if (priv->host_mbx_busy == 1)
+		return -1;
+
+	ml = moca_detach_head(priv, &priv->host_msg_pend_list);
+	if (ml == NULL)
+		return -EAGAIN;
+	m = list_entry(ml, struct moca_host_msg, chain);
+
+	moca_write_mem(priv, priv->mbx_offset[cpuid] + priv->host_req_offset,
+		m->data, m->len);
+
+	moca_ringbell(priv, priv->regs->h2m_req_bit[cpuid]);
+	moca_attach_tail(priv, ml, &priv->host_msg_free_list);
+	wake_up(&priv->host_msg_wq);
+
+	return 0;
+}
+
+/* Must have dev_mutex when calling this function */
+static int moca_wdt(struct moca_priv_data *priv, u32 cpu)
+{
+	struct list_head *ml = NULL;
+	struct moca_core_msg *m;
+
+	ml = moca_detach_head(priv, &priv->core_msg_free_list);
+	if (ml == NULL) {
+		printk(KERN_WARNING
+			"%s: no entries left on core_msg_free_list\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	if (priv->mmp_20) {
+		/*
+		 * generate phony wdt message to pass to the user
+		 * type = 0x03 (trap)
+		 * IE type = 0x11003 (wdt), 4 bytes length
+		 */
+		m = list_entry(ml, struct moca_core_msg, chain);
+		m->data[0] = cpu_to_be32(0x3);
+		m->data[1] = cpu_to_be32(4);
+		m->data[2] = cpu_to_be32(0x11003);
+		m->len = 12;
+	} else {
+		/*
+		 * generate phony wdt message to pass to the user
+		 * type = 0x09 (trap)
+		 * IE type = 0xff01 (wdt), 4 bytes length
+		 */
+		m = list_entry(ml, struct moca_core_msg, chain);
+		m->data[0] = cpu_to_be32(0x09000001);
+		m->data[1] = cpu_to_be32((MOCA_IE_WDT << 16) | 4);
+		m->data[2] = cpu_to_be32(cpu);
+		m->len = 12;
+	}
+
+	moca_attach_tail(priv, ml, &priv->core_msg_pend_list);
+	wake_up(&priv->core_msg_wq);
+
+	return 0;
+}
+
+static int moca_get_mbx_offset(struct moca_priv_data *priv)
+{
+	uintptr_t base;
+
+	if (priv->mbx_offset[1] == -1) {
+		if (moca_is_20(priv))
+			base = MOCA_RD(priv->base +
+				priv->regs->moca2host_mmp_inbox_0_offset) &
+				0x1fffffff;
+		else
+			base = MOCA_RD(priv->base + priv->regs->gp0_offset) &
+				0x1fffffff;
+
+		if ((base == 0) ||
+			(base >= priv->regs->cntl_mem_size +
+			 priv->regs->cntl_mem_offset) ||
+			(base & 0x07)) {
+			printk(KERN_WARNING "%s: can't get mailbox base CPU 1 (%X)\n",
+				__func__, (int)base);
+			return -1;
+		}
+		priv->mbx_offset[1] = base;
+	}
+
+	if ((priv->mbx_offset[0] == -1) &&
+		(moca_is_20(priv)) &&
+		(priv->mmp_20)) {
+		base = MOCA_RD(priv->base +
+			priv->regs->moca2host_mmp_inbox_2_offset) &
+			0x1fffffff;
+		if ((base == 0) ||
+			(base >= priv->regs->cntl_mem_size +
+			 priv->regs->cntl_mem_offset) ||
+			(base & 0x07)) {
+			printk(KERN_WARNING "%s: can't get mailbox base CPU 0 (%X)\n",
+				__func__, (int)base);
+			return -1;
+		}
+
+		priv->mbx_offset[0] = base;
+	}
+
+	return 0;
+}
+
+/*
+ * INTERRUPT / WORKQUEUE BH
+ */
+
+static void moca_work_handler(struct work_struct *work)
+{
+	struct moca_priv_data *priv =
+		container_of(work, struct moca_priv_data, work);
+	u32 mask = 0;
+	int ret, stopped = 0;
+
+	if (priv->enabled) {
+		mask = moca_irq_status(priv, FLUSH_IRQ);
+		if (mask & M2H_DMA) {
+			mask &= ~M2H_DMA;
+			complete(&priv->copy_complete);
+		}
+
+		if (mask & M2H_NEXTCHUNK) {
+			mask &= ~M2H_NEXTCHUNK;
+			complete(&priv->chunk_complete);
+		}
+
+		if (moca_is_20(priv) &&
+			(mask & M2H_NEXTCHUNK_CPU0)) {
+			mask &= ~M2H_NEXTCHUNK_CPU0;
+			complete(&priv->chunk_complete);
+		}
+
+		if (mask == 0) {
+			moca_enable_irq(priv);
+			return;
+		}
+
+		if (mask & (M2H_REQ | M2H_RESP |
+			M2H_REQ_CPU0 | M2H_RESP_CPU0)) {
+			if (moca_get_mbx_offset(priv)) {
+				/* mbx interrupt but mbx_offset is bogus?? */
+				moca_enable_irq(priv);
+				return;
+			}
+		}
+	}
+
+	mutex_lock(&priv->dev_mutex);
+
+	if (!priv->running) {
+		stopped = 1;
+	} else {
+		/* fatal events */
+		if (mask & M2H_ASSERT) {
+			ret = moca_recvmsg(priv, priv->core_req_offset,
+				priv->core_req_size, 0, 1);
+			if (ret == -ENOMEM)
+				priv->assert_pending |= 2;
+		}
+		if (mask & M2H_ASSERT_CPU0) {
+			ret = moca_recvmsg(priv, priv->core_req_offset,
+				priv->core_req_size, 0, 0);
+			if (ret == -ENOMEM)
+				priv->assert_pending |= 1;
+		}
+		/* M2H_WDT_CPU1 is mapped to the only CPU for MoCA11 HW */
+		if (mask & M2H_WDT_CPU1) {
+			ret = moca_wdt(priv, 2);
+			if (ret == -ENOMEM)
+				priv->wdt_pending |= (1 << 1);
+			stopped = 1;
+		}
+		if (moca_is_20(priv) &&
+			(mask & M2H_WDT_CPU0)) {
+			ret = moca_wdt(priv, 1);
+			if (ret == -ENOMEM)
+				priv->wdt_pending |= (1 << 0);
+			stopped = 1;
+		}
+	}
+	if (stopped) {
+		priv->running = 0;
+		priv->core_req_pending = 0;
+		priv->host_resp_pending = 0;
+		priv->host_mbx_busy = 1;
+		mutex_unlock(&priv->dev_mutex);
+		wake_up(&priv->core_msg_wq);
+		return;
+	}
+
+	/* normal events */
+	if (mask & M2H_REQ) {
+		ret = moca_recvmsg(priv, priv->core_req_offset,
+			priv->core_req_size, priv->core_resp_offset, 1);
+		if (ret == -ENOMEM)
+			priv->core_req_pending |= 2;
+	}
+	if (mask & M2H_RESP) {
+		ret = moca_recvmsg(priv, priv->host_resp_offset,
+			priv->host_resp_size, 0, 1);
+		if (ret == -ENOMEM)
+			priv->host_resp_pending |= 2;
+		if (ret == 0) {
+			priv->host_mbx_busy = 0;
+			moca_sendmsg(priv, 1);
+		}
+	}
+
+	if (mask & M2H_REQ_CPU0) {
+		ret = moca_recvmsg(priv, priv->core_req_offset,
+			priv->core_req_size, priv->core_resp_offset, 0);
+		if (ret == -ENOMEM)
+			priv->core_req_pending |= 1;
+	}
+	if (mask & M2H_RESP_CPU0) {
+		ret = moca_recvmsg(priv, priv->host_resp_offset,
+			priv->host_resp_size, 0, 0);
+		if (ret == -ENOMEM)
+			priv->host_resp_pending |= 1;
+		if (ret == 0) {
+			priv->host_mbx_busy = 0;
+			moca_sendmsg(priv, 0);
+		}
+	}
+	mutex_unlock(&priv->dev_mutex);
+
+	moca_enable_irq(priv);
+}
+
+static irqreturn_t moca_interrupt(int irq, void *arg)
+{
+	struct moca_priv_data *priv = arg;
+	struct moca_platform_data *pd;
+
+	if (!priv || !priv->enabled || !priv->pdev ||
+			!priv->pdev->dev.platform_data) {
+		printk("moca_interrupt: can't go yet.\n");
+		moca_disable_irq(priv);
+		return IRQ_HANDLED;
+	}
+
+#if MOCA6816
+	pd = (struct moca_platform_data *)priv->pdev->dev.platform_data;
+
+	/*
+	 * If the driver is for an external chip then the work function needs
+	 * to run, otherwise a few interrupts can be handled here
+	 */
+	if (0 == pd->use_spi) {
+#else
+	if (1) {
+#endif
+		/* Calling the "no_lock" version of this function. This is ok
+		   because no other function processes the DMA INT so there
+		   should be no contention issues. If more than the DMA
+		   INTs are checked here, the locking should be reconsidered. */
+		u32 mask = moca_irq_status_no_lock(priv, FLUSH_DMA_ONLY);
+
+		/* need to handle DMA completions ASAP */
+		if (mask & M2H_DMA) {
+			complete(&priv->copy_complete);
+			mask &= ~M2H_DMA;
+		}
+		if (mask & M2H_NEXTCHUNK) {
+			complete(&priv->chunk_complete);
+			mask &= ~M2H_NEXTCHUNK;
+		}
+
+		if (!mask)
+			return IRQ_HANDLED;
+	}
+	moca_disable_irq(priv);
+	schedule_work(&priv->work);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * BCM3450 ACCESS VIA I2C
+ */
+
+static int moca_3450_wait(struct moca_priv_data *priv)
+{
+	struct bsc_regs *bsc = priv->i2c_base;
+	long timeout = HZ / 1000; /* 1 ms */
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait);
+	int i = 0;
+
+	do {
+		if (I2C_RD(&bsc->iic_enable) & 2) {
+			I2C_WR(&bsc->iic_enable, 0);
+			return 0;
+		}
+		if (i++ > 50) {
+			I2C_WR(&bsc->iic_enable, 0);
+			printk(KERN_WARNING "%s: 3450 I2C timed out\n",
+				__func__);
+			return -1;
+		}
+		sleep_on_timeout(&wait, timeout ? timeout : 1);
+	} while (1);
+}
+
+static void moca_3450_write_i2c(struct moca_priv_data *priv, u8 addr, u32 data)
+{
+	struct bsc_regs *bsc = priv->i2c_base;
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+
+	I2C_WR(&bsc->iic_enable, 0);
+	I2C_WR(&bsc->chip_address, pd->bcm3450_i2c_addr << 1);
+	I2C_WR(&bsc->data_in[0], (addr >> 2) | (data << 8));
+	I2C_WR(&bsc->data_in[1], data >> 24);
+	I2C_WR(&bsc->cnt_reg, (5 << 0) | (0 << 6));	/* 5B out, 0B in */
+	I2C_WR(&bsc->ctl_reg, (1 << 4) | (0 << 0));	/* write only, 390kHz */
+	I2C_WR(&bsc->ctlhi_reg, (1 << 6));		/* 32-bit words */
+	I2C_WR(&bsc->iic_enable, 1);
+
+	moca_3450_wait(priv);
+}
+
+static u32 moca_3450_read_i2c(struct moca_priv_data *priv, u8 addr)
+{
+	struct bsc_regs *bsc = priv->i2c_base;
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+
+	I2C_WR(&bsc->iic_enable, 0);
+	I2C_WR(&bsc->chip_address, pd->bcm3450_i2c_addr << 1);
+	I2C_WR(&bsc->data_in[0], (addr >> 2));
+	I2C_WR(&bsc->cnt_reg, (1 << 0) | (4 << 6));	/* 1B out then 4B in */
+	I2C_WR(&bsc->ctl_reg, (1 << 4) | (3 << 0));	/* write/read, 390kHz */
+	I2C_WR(&bsc->ctlhi_reg, (1 << 6));		/* 32-bit words */
+	I2C_WR(&bsc->iic_enable, 1);
+
+	if (moca_3450_wait(priv) == 0)
+		return I2C_RD(&bsc->data_out[0]);
+	else
+		return 0xffffffff;
+}
+
+
+#define BCM3450_CHIP_ID		0x00
+#define BCM3450_CHIP_REV	0x04
+#define BCM3450_LNACNTL		0x14
+#define BCM3450_PACNTL		0x18
+#define BCM3450_MISC		0x1c
+
+static int moca_3450_get_reg(struct moca_priv_data *priv, unsigned int  *arg)
+{
+	struct moca_xfer x;
+	u32 *dst;
+	u32 val;
+
+	if (!priv->i2c_base)
+		return -ENODEV;
+
+	if (copy_from_user(&x, (void __user *)arg, sizeof(x)))
+		return -EFAULT;
+
+	dst = (u32 *)(unsigned long)x.buf;
+
+	mutex_lock(&priv->moca_i2c_mutex);
+	val = moca_3450_read(priv, x.moca_addr);
+	mutex_unlock(&priv->moca_i2c_mutex);
+
+	if (put_user(val, dst))
+		return -EFAULT;
+
+	return 0;
+	}
+
+static int moca_3450_set_reg(struct moca_priv_data *priv, unsigned int  *arg)
+	{
+	struct moca_xfer x;
+	u32 val;
+
+	if (!priv->i2c_base)
+		return -ENODEV;
+
+	if (copy_from_user(&x, (void __user *)arg, sizeof(x)))
+		return -EFAULT;
+
+	mutex_lock(&priv->moca_i2c_mutex);
+	if (get_user(val, (u32 *)(unsigned long)x.buf))
+		return -EFAULT;
+
+	moca_3450_write(priv, x.moca_addr, val);
+	mutex_unlock(&priv->moca_i2c_mutex);
+
+	return 0;
+}
+
+static void moca_3450_init(struct moca_priv_data *priv, int action)
+{
+	u32 data;
+
+	mutex_lock(&priv->moca_i2c_mutex);
+
+	if (action == MOCA_ENABLE) {
+		/* reset the 3450's I2C block */
+		moca_3450_write(priv, BCM3450_MISC,
+			moca_3450_read(priv, BCM3450_MISC) | 1);
+
+		/* verify chip ID */
+		data = moca_3450_read(priv, BCM3450_CHIP_ID);
+		pr_info("bcm3450 chip id is: %08x\n", data);
+		if (data != 0x3450)
+			printk(KERN_WARNING "%s: invalid 3450 chip ID 0x%08x\n",
+				__func__, data);
+
+		/* reset the 3450's deserializer */
+		data = moca_3450_read(priv, BCM3450_MISC);
+		data &= ~0x8000; /* power on PA/LNA */
+		moca_3450_write(priv, BCM3450_MISC, data | 2);
+		moca_3450_write(priv, BCM3450_MISC, data & ~2);
+
+		/*enable the serial interface*/
+		data = moca_3450_read(priv, BCM3450_MISC);
+		moca_3450_write(priv, BCM3450_MISC, data | (1<<29) );
+		
+		/* set new PA gain */
+		data = moca_3450_read(priv, BCM3450_PACNTL);
+
+		moca_3450_write(priv, BCM3450_PACNTL, (data & ~0x02007ffd) |
+			(0x09 << 11) |		/* RDEG */
+			(0x38 << 5) |		/* CURR_CONT */
+			(0x05 << 2));		/* CURR_FOLLOWER */
+
+		/* Set LNACNTRL to default value */
+		moca_3450_write(priv, BCM3450_LNACNTL, 0x4924);
+
+	} else {
+		
+		/*disable the serial interface*/
+		data = moca_3450_read(priv, BCM3450_MISC);
+		moca_3450_write(priv, BCM3450_MISC, data & (~(1<<29)) );
+
+		/* power down the PA/LNA */
+		data = moca_3450_read(priv, BCM3450_MISC);
+		moca_3450_write(priv, BCM3450_MISC, data | 0x8000);
+
+		data = moca_3450_read(priv, BCM3450_PACNTL);
+		moca_3450_write(priv, BCM3450_PACNTL, data |
+			(0x01 << 0) | /* PA_PWRDWN */
+			(0x01 << 25)); /* PA_SELECT_PWRUP_BSC */
+
+		data = moca_3450_read(priv, BCM3450_LNACNTL);
+		/* LNA_INBIAS=0, LNA_PWRUP_IIC=0: */
+		data &= ~((7<<12) | (1<<28));
+		/* LNA_SELECT_PWRUP_IIC=1: */
+		moca_3450_write(priv, BCM3450_LNACNTL, data | (1<<29));
+
+	}
+	mutex_unlock(&priv->moca_i2c_mutex);
+}
+
+/*
+ * FILE OPERATIONS
+ */
+
+static int moca_file_open(struct inode *inode, struct file *file)
+{
+	unsigned int minor = iminor(inode);
+	struct moca_priv_data *priv;
+
+	if ((minor > NUM_MINORS) || minor_tbl[minor] == NULL)
+		return -ENODEV;
+
+	file->private_data = priv = minor_tbl[minor];
+
+	mutex_lock(&priv->dev_mutex);
+	priv->refcount++;
+	mutex_unlock(&priv->dev_mutex);
+	return 0;
+}
+
+static int moca_file_release(struct inode *inode, struct file *file)
+{
+	struct moca_priv_data *priv = file->private_data;
+
+	mutex_lock(&priv->dev_mutex);
+	priv->refcount--;
+	if (priv->refcount == 0 && priv->running == 1) {
+		/* last user closed the device */
+		moca_msg_reset(priv);
+		moca_hw_init(priv, MOCA_DISABLE);
+	}
+	mutex_unlock(&priv->dev_mutex);
+	return 0;
+}
+
+static int moca_ioctl_readmem(struct moca_priv_data *priv,
+	unsigned long xfer_uaddr)
+{
+	struct moca_xfer x;
+	uintptr_t i, src;
+	u32 *dst;
+
+	if (copy_from_user(&x, (void __user *)xfer_uaddr, sizeof(x)))
+		return -EFAULT;
+
+#if !DSL_MOCA
+	if (moca_range_ok(priv, x.moca_addr, x.len) < 0)
+		return -EINVAL;
+#endif
+
+	src = (uintptr_t)priv->base + x.moca_addr;
+	dst = (void *)(unsigned long)x.buf;
+
+	for (i = 0; i < x.len; i += 4, src += 4, dst++)
+		if (put_user(cpu_to_be32(MOCA_RD(src)), dst))
+			return -EFAULT;
+
+	return 0;
+}
+
+
+static int moca_ioctl_writemem(struct moca_priv_data *priv,
+	unsigned long xfer_uaddr)
+{
+	struct moca_xfer x;
+	uintptr_t i, dst;
+	u32 *src;
+
+	if (copy_from_user(&x, (void __user *)xfer_uaddr, sizeof(x)))
+		return -EFAULT;
+
+#if !DSL_MOCA
+	if (moca_range_ok(priv, x.moca_addr, x.len) < 0)
+		return -EINVAL;
+#endif
+
+	dst = (uintptr_t)priv->base + x.moca_addr;
+	src = (void *)(unsigned long)x.buf;
+
+	for (i = 0; i < x.len; i += 4, src++, dst += 4) {
+		unsigned int x;
+		if (get_user(x, src))
+			return -EFAULT;
+
+		MOCA_WR(dst, cpu_to_be32(x));
+	}
+
+	return 0;
+}
+
+#if !MOCA6816
+static unsigned int moca_get_phy_freq(struct moca_priv_data *priv)
+{
+	return priv->phy_freq;
+}
+#endif
+
+/* legacy ioctl - DEPRECATED */
+static int moca_ioctl_get_drv_info_v2(struct moca_priv_data *priv,
+	unsigned long arg)
+{
+	struct moca_kdrv_info_v2 info;
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+
+	memset(&info, 0, sizeof(info));
+	info.version = DRV_VERSION;
+	info.build_number = DRV_BUILD_NUMBER;
+	info.builtin_fw = !!bmoca_fw_image;
+
+	info.uptime = (jiffies - priv->start_time) / HZ;
+	info.refcount = priv->refcount;
+	if (moca_is_20(priv))
+		info.gp1 = priv->running ? MOCA_RD(priv->base +
+			priv->regs->moca2host_mmp_inbox_1_offset) : 0;
+	else
+		info.gp1 = priv->running ?
+			MOCA_RD(priv->base + priv->regs->gp1_offset) : 0;
+
+	memcpy(info.enet_name, pd->enet_name, MOCA_IFNAMSIZ);
+
+	info.enet_id = pd->enet_id;
+	info.macaddr_hi = pd->macaddr_hi;
+	info.macaddr_lo = pd->macaddr_lo;
+	info.hw_rev = pd->chip_id;
+	info.rf_band = pd->rf_band;
+
+	if (copy_to_user((void *)arg, &info, sizeof(info)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int moca_ioctl_get_drv_info(struct moca_priv_data *priv,
+	unsigned long arg)
+{
+	struct moca_kdrv_info info;
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+
+	memset(&info, 0, sizeof(info));
+	info.version = DRV_VERSION;
+	info.build_number = DRV_BUILD_NUMBER;
+	info.builtin_fw = !!bmoca_fw_image;
+
+	info.uptime = (jiffies - priv->start_time) / HZ;
+	info.refcount = priv->refcount;
+	if (moca_is_20(priv))
+		info.gp1 = priv->running ? MOCA_RD(priv->base +
+			priv->regs->moca2host_mmp_inbox_1_offset) : 0;
+	else
+		info.gp1 = priv->running ?
+			MOCA_RD(priv->base + priv->regs->gp1_offset) : 0;
+
+	info.phy_freq = moca_get_phy_freq(priv);
+
+#if MOCA6816
+	info.device_id = (((struct moca_platform_data *)
+		priv->pdev->dev.platform_data)->devId);
+	moca_read_mac_addr(priv, &pd->macaddr_hi,
+		&pd->macaddr_lo);
+#endif
+
+	memcpy(info.enet_name, pd->enet_name, MOCA_IFNAMSIZ);
+
+	info.enet_id = pd->enet_id;
+	info.macaddr_hi = pd->macaddr_hi;
+	info.macaddr_lo = pd->macaddr_lo;
+	info.chip_id = pd->chip_id;
+	info.hw_rev = pd->hw_rev;
+	info.rf_band = pd->rf_band;
+
+	if (copy_to_user((void *)arg, &info, sizeof(info)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int moca_ioctl_check_for_data(struct moca_priv_data *priv,
+	unsigned long arg)
+{
+	int data_avail = 0;
+	int ret;
+	u32 mask;
+
+	moca_disable_irq(priv);
+
+	if (moca_get_mbx_offset(priv))
+	{
+		moca_enable_irq(priv);
+		return -EIO;
+	}
+
+	/* If an IRQ is pending, process it here rather than waiting for it to
+	   ensure the results are ready. Clear the ones we are currently
+	   processing */
+	mask = moca_irq_status(priv, FLUSH_REQRESP_ONLY);
+
+	if (mask & M2H_REQ) {
+		ret = moca_recvmsg(priv, priv->core_req_offset,
+			priv->core_req_size, priv->core_resp_offset, 1);
+		if (ret == -ENOMEM)
+			priv->core_req_pending |= 2;
+	}
+	if (mask & M2H_RESP) {
+		ret = moca_recvmsg(priv, priv->host_resp_offset,
+			priv->host_resp_size, 0, 1);
+		if (ret == -ENOMEM)
+			priv->host_resp_pending |= 2;
+		if (ret == 0) {
+			priv->host_mbx_busy = 0;
+			moca_sendmsg(priv, 1);
+		}
+	}
+
+	if (mask & M2H_REQ_CPU0) {
+		ret = moca_recvmsg(priv, priv->core_req_offset,
+			priv->core_req_size, priv->core_resp_offset, 0);
+		if (ret == -ENOMEM)
+			priv->core_req_pending |= 1;
+	}
+	if (mask & M2H_RESP_CPU0) {
+		ret = moca_recvmsg(priv, priv->host_resp_offset,
+			priv->host_resp_size, 0, 0);
+		if (ret == -ENOMEM)
+			priv->host_resp_pending |= 1;
+		if (ret == 0) {
+			priv->host_mbx_busy = 0;
+			moca_sendmsg(priv, 0);
+		}
+	}
+
+	moca_enable_irq(priv);
+
+	spin_lock_bh(&priv->list_lock);
+	data_avail = !list_empty(&priv->core_msg_pend_list);
+	spin_unlock_bh(&priv->list_lock);
+
+	if (copy_to_user((void *)arg, &data_avail, sizeof(data_avail)))
+		return -EFAULT;
+
+	return 0;
+}
+
+
+static long moca_file_ioctl(struct file *file, unsigned int cmd,
+	unsigned long arg)
+{
+	struct moca_priv_data *priv = file->private_data;
+	struct moca_start	  start;
+	long ret = -ENOTTY;
+#if MOCA6816
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+#endif
+
+	mutex_lock(&priv->dev_mutex);
+
+	switch (cmd) {
+	case MOCA_IOCTL_START:
+		ret = 0;
+
+#if MOCA6816
+		/*
+		 * When MoCA is configured as WAN interface it will
+		 * get a new MAC address
+		 */
+		moca_read_mac_addr(priv, &pd->macaddr_hi,
+			&pd->macaddr_lo);
+#endif
+
+		clk_set_rate(priv->phy_clk, DEFAULT_PHY_CLOCK);
+
+		if (copy_from_user(&start, (void __user *)arg, sizeof(start)))
+			ret = -EFAULT;
+
+		if (ret >= 0) {
+			priv->bonded_mode =
+				(start.boot_flags & MOCA_BOOT_FLAGS_BONDED);
+
+			if (!priv->enabled) {
+				moca_msg_reset(priv);
+				moca_hw_init(priv, MOCA_ENABLE);
+				moca_3450_init(priv, MOCA_ENABLE);
+				moca_irq_status(priv, FLUSH_IRQ);
+				moca_mmp_init(priv, 0);
+			}
+
+			ret = moca_write_img(priv, &start.x);
+			if (ret >= 0)
+				priv->running = 1;
+		}
+		break;
+	case MOCA_IOCTL_STOP:
+		moca_msg_reset(priv);
+		moca_3450_init(priv, MOCA_DISABLE);
+		moca_hw_init(priv, MOCA_DISABLE);
+		ret = 0;
+		break;
+	case MOCA_IOCTL_READMEM:
+		ret = moca_ioctl_readmem(priv, arg);
+		break;
+	case MOCA_IOCTL_WRITEMEM:
+		ret = moca_ioctl_writemem(priv, arg);
+		break;
+	case MOCA_IOCTL_GET_DRV_INFO_V2:
+		ret = moca_ioctl_get_drv_info_v2(priv, arg);
+		break;
+	case MOCA_IOCTL_GET_DRV_INFO:
+		ret = moca_ioctl_get_drv_info(priv, arg);
+		break;
+	case MOCA_IOCTL_CHECK_FOR_DATA:
+		if (priv->running)
+			ret = moca_ioctl_check_for_data(priv, arg);
+		else
+			ret = -EIO;
+		break;
+	case MOCA_IOCTL_WOL:
+		priv->wol_enabled = (int)arg;
+		dev_info(priv->dev, "WOL is %s\n",
+			priv->wol_enabled ? "enabled" : "disabled");
+		ret = 0;
+		break;
+	case MOCA_IOCTL_SET_CPU_RATE:
+		if (!priv->cpu_clk)
+			ret = -EIO;
+		else
+			ret = clk_set_rate(priv->cpu_clk, (unsigned int)arg);
+		break;
+	case MOCA_IOCTL_SET_PHY_RATE:
+		if (!priv->phy_clk)
+			ret = -EIO;
+		else
+			ret = clk_set_rate(priv->phy_clk, (unsigned int)arg);
+		break;
+	case MOCA_IOCTL_GET_3450_REG:
+		ret = moca_3450_get_reg(priv, (unsigned int *)arg);
+		break;
+	case MOCA_IOCTL_SET_3450_REG:
+		ret = moca_3450_set_reg(priv, (unsigned int *)arg);
+		break;
+	default:
+		pr_warn("moca_ioctl: unrecognized cmd\n");
+		break;
+	}
+	mutex_unlock(&priv->dev_mutex);
+
+	return ret;
+}
+
+static ssize_t moca_file_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	struct moca_priv_data *priv = file->private_data;
+	DECLARE_WAITQUEUE(wait, current);
+	struct list_head *ml = NULL;
+	struct moca_core_msg *m = NULL;
+	ssize_t ret;
+	int empty_free_list = 0;
+
+	if (count < priv->core_req_size)
+		return -EINVAL;
+
+	add_wait_queue(&priv->core_msg_wq, &wait);
+	do {
+		__set_current_state(TASK_INTERRUPTIBLE);
+
+		ml = moca_detach_head(priv, &priv->core_msg_pend_list);
+		if (ml != NULL) {
+			m = list_entry(ml, struct moca_core_msg, chain);
+			ret = 0;
+			break;
+		}
+		if (file->f_flags & O_NONBLOCK) {
+			ret = -EAGAIN;
+			break;
+		}
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+		schedule();
+	} while (1);
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&priv->core_msg_wq, &wait);
+
+	if (ret < 0)
+		return ret;
+
+	if (copy_to_user(buf, m->data, m->len))
+		ret = -EFAULT;	/* beware: message will be dropped */
+	else
+		ret = m->len;
+
+	spin_lock_bh(&priv->list_lock);
+	if (list_empty(&priv->core_msg_free_list))
+		empty_free_list = 1;
+	list_add_tail(ml, &priv->core_msg_free_list);
+	spin_unlock_bh(&priv->list_lock);
+
+	if (empty_free_list) {
+		/*
+		 * we just freed up space for another message, so if there was
+		 * a backlog, clear it out
+		 */
+		mutex_lock(&priv->dev_mutex);
+
+		if (moca_get_mbx_offset(priv)) {
+			mutex_unlock(&priv->dev_mutex);
+			return -EIO;
+		}
+
+		if (priv->assert_pending & 2) {
+			if (moca_recvmsg(priv, priv->core_req_offset,
+				priv->core_req_size, 0, 1) != -ENOMEM)
+				priv->assert_pending &= ~2;
+			else
+				printk(KERN_WARNING "%s: moca_recvmsg "
+					"assert failed\n", __func__);
+		}
+		if (priv->assert_pending & 1) {
+			if (moca_recvmsg(priv, priv->core_req_offset,
+				priv->core_req_size, 0, 0) != -ENOMEM)
+				priv->assert_pending &= ~1;
+			else
+				printk(KERN_WARNING "%s: moca_recvmsg "
+					"assert failed\n", __func__);
+		}
+		if (priv->wdt_pending)
+			if (moca_wdt(priv, priv->wdt_pending) != -ENOMEM)
+				priv->wdt_pending = 0;
+
+		if (priv->core_req_pending & 1) {
+			if (moca_recvmsg(priv, priv->core_req_offset,
+				priv->core_req_size, priv->core_resp_offset, 0)
+				!= -ENOMEM)
+				priv->core_req_pending &= ~1;
+			else
+				printk(KERN_WARNING "%s: moca_recvmsg "
+					"core_req failed\n", __func__);
+		}
+		if (priv->core_req_pending & 2) {
+			if (moca_recvmsg(priv, priv->core_req_offset,
+				priv->core_req_size, priv->core_resp_offset, 1)
+				!= -ENOMEM)
+				priv->core_req_pending &= ~2;
+			else
+				printk(KERN_WARNING "%s: moca_recvmsg "
+					"core_req failed\n", __func__);
+		}
+		if (priv->host_resp_pending & 1) {
+			if (moca_recvmsg(priv, priv->host_resp_offset,
+				priv->host_resp_size, 0, 0) != -ENOMEM)
+				priv->host_resp_pending &= ~1;
+			else
+				printk(KERN_WARNING "%s: moca_recvmsg "
+					"host_resp failed\n", __func__);
+		}
+		if (priv->host_resp_pending & 2) {
+			if (moca_recvmsg(priv, priv->host_resp_offset,
+				priv->host_resp_size, 0, 1) != -ENOMEM)
+				priv->host_resp_pending &= ~2;
+			else
+				printk(KERN_WARNING "%s: moca_recvmsg "
+					"host_resp failed\n", __func__);
+		}
+		mutex_unlock(&priv->dev_mutex);
+	}
+
+	return ret;
+}
+
+static ssize_t moca_file_write(struct file *file, const char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	struct moca_priv_data *priv = file->private_data;
+	DECLARE_WAITQUEUE(wait, current);
+	struct list_head *ml = NULL;
+	struct moca_host_msg *m = NULL;
+	ssize_t ret;
+	u32 cpuid;
+
+	if (count > priv->host_req_size)
+		return -EINVAL;
+
+	add_wait_queue(&priv->host_msg_wq, &wait);
+	do {
+		__set_current_state(TASK_INTERRUPTIBLE);
+
+		ml = moca_detach_head(priv, &priv->host_msg_free_list);
+		if (ml != NULL) {
+			m = list_entry(ml, struct moca_host_msg, chain);
+			ret = 0;
+			break;
+		}
+		if (file->f_flags & O_NONBLOCK) {
+			ret = -EAGAIN;
+			break;
+		}
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+		schedule();
+	} while (1);
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&priv->host_msg_wq, &wait);
+
+	if (ret < 0)
+		return ret;
+
+	m->len = count;
+
+	if (copy_from_user(m->data, buf, m->len)) {
+		ret = -EFAULT;
+		goto bad;
+	}
+
+	ret = moca_h2m_sanity_check(priv, m);
+	if (ret < 0) {
+		ret = -EINVAL;
+		goto bad;
+	}
+
+	moca_attach_tail(priv, ml, &priv->host_msg_pend_list);
+
+	if (!priv->mmp_20)
+		cpuid = 1;
+	else {
+		if (cpu_to_be32(m->data[0]) & 0x10)
+			cpuid = 0;
+		else
+			cpuid = 1;
+	}
+	mutex_lock(&priv->dev_mutex);
+	if (priv->running) {
+		if (moca_get_mbx_offset(priv))
+			ret = -EIO;
+		else
+			moca_sendmsg(priv, cpuid);
+	} else
+		ret = -EIO;
+	mutex_unlock(&priv->dev_mutex);
+
+	return ret;
+
+bad:
+	moca_attach_tail(priv, ml, &priv->host_msg_free_list);
+
+	return ret;
+}
+
+static unsigned int moca_file_poll(struct file *file, poll_table *wait)
+{
+	struct moca_priv_data *priv = file->private_data;
+	unsigned int ret = 0;
+
+	poll_wait(file, &priv->core_msg_wq, wait);
+	poll_wait(file, &priv->host_msg_wq, wait);
+
+	spin_lock_bh(&priv->list_lock);
+	if (!list_empty(&priv->core_msg_pend_list))
+		ret |= POLLIN | POLLRDNORM;
+	if (!list_empty(&priv->host_msg_free_list))
+		ret |= POLLOUT | POLLWRNORM;
+	spin_unlock_bh(&priv->list_lock);
+
+	return ret;
+}
+
+static const struct file_operations moca_fops = {
+	.owner =		THIS_MODULE,
+	.open =			moca_file_open,
+	.release =		moca_file_release,
+	.unlocked_ioctl =	moca_file_ioctl,
+	.read =			moca_file_read,
+	.write =		moca_file_write,
+	.poll =			moca_file_poll,
+};
+
+/*
+ * PLATFORM DRIVER
+ */
+
+static int moca_probe(struct platform_device *pdev)
+{
+	struct moca_priv_data *priv;
+	struct resource *mres, *ires = NULL;
+	int minor, err = 0;
+	struct moca_platform_data *pd = pdev->dev.platform_data;
+
+	if (pd->use_spi && !pd->spi) {
+		pr_err("moca: use_spi=1, but no bmoca SPI device found.\n");
+		return -EINVAL;
+	}
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		printk(KERN_ERR "%s: out of memory\n", __func__);
+		return -ENOMEM;
+	}
+	dev_set_drvdata(&pdev->dev, priv);
+	priv->pdev = pdev;
+	priv->start_time = jiffies;
+
+	priv->clk = clk_get(&pdev->dev, "moca");
+
+	priv->cpu_clk = clk_get(&pdev->dev, "moca-cpu");
+	priv->phy_clk = clk_get(&pdev->dev, "moca-phy");
+
+	priv->hw_rev = pd->hw_rev;
+
+#if MOCA6816
+	if ((pd->hw_rev == HWREV_MOCA_20_ALT) ||
+	    (pd->hw_rev == HWREV_MOCA_20_GEN21) ||
+	    (pd->hw_rev == HWREV_MOCA_20_GEN22) ||
+	    (pd->hw_rev == HWREV_MOCA_20_GEN23))
+		priv->regs = &regs_6802;
+	else
+		priv->regs = &regs_6816;
+#else
+	if (pd->hw_rev == HWREV_MOCA_11_PLUS)
+		priv->regs = &regs_11_plus;
+	else if (pd->hw_rev == HWREV_MOCA_11_LITE)
+		priv->regs = &regs_11_lite;
+	else if (pd->hw_rev == HWREV_MOCA_11)
+		priv->regs = &regs_11;
+	else if ((pd->hw_rev == HWREV_MOCA_20_ALT) ||
+		(pd->hw_rev == HWREV_MOCA_20_GEN21) ||
+		(pd->hw_rev == HWREV_MOCA_20_GEN22))
+		priv->regs = &regs_20;
+	else
+		priv->regs = &regs_11_plus;
+#endif
+
+	init_waitqueue_head(&priv->host_msg_wq);
+	init_waitqueue_head(&priv->core_msg_wq);
+	init_completion(&priv->copy_complete);
+	init_completion(&priv->chunk_complete);
+
+	spin_lock_init(&priv->list_lock);
+	spin_lock_init(&priv->clock_lock);
+	mutex_init(&priv->irq_status_mutex);
+	mutex_init(&priv->dev_mutex);
+	mutex_init(&priv->copy_mutex);
+	mutex_init(&priv->moca_i2c_mutex);
+
+	sg_init_table(priv->fw_sg, MAX_FW_PAGES);
+
+	INIT_WORK(&priv->work, moca_work_handler);
+
+	priv->minor = -1;
+	for (minor = 0; minor < NUM_MINORS; minor++) {
+		if (minor_tbl[minor] == NULL) {
+			priv->minor = minor;
+			break;
+		}
+	}
+
+	if (priv->minor == -1) {
+		printk(KERN_ERR "%s: can't allocate minor device\n",
+			__func__);
+		err = -EIO;
+		goto bad;
+	}
+
+	mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+	if (!mres || !ires) {
+		printk(KERN_ERR "%s: can't get resources\n", __func__);
+		err = -EIO;
+		goto bad;
+	}
+
+#if defined(CONFIG_BCM_6802_MoCA)
+	priv->base = (void *)mres->start;
+	priv->irq = pd->spi->irq;
+	priv->i2c_base = (void *)pd->bcm3450_i2c_base;
+#else
+	priv->base = ioremap(mres->start, mres->end - mres->start + 1);
+	priv->irq = ires->start;
+	priv->i2c_base = ioremap(pd->bcm3450_i2c_base, sizeof(struct bsc_regs));
+#endif
+
+#if MOCA6816
+	moca_read_mac_addr(priv, &pd->macaddr_hi, &pd->macaddr_lo);
+#endif
+	if (hw_specific_init(priv))
+		goto bad;
+
+	/* leave core in reset until we get an ioctl */
+	//moca_hw_reset(priv); // moca_hw_init(MOCA_ENABLE) does this anyway
+
+	moca_hw_init(priv, MOCA_ENABLE);
+	moca_disable_irq(priv);
+	moca_msg_reset(priv);
+	moca_hw_init(priv, MOCA_DISABLE);
+
+	printk(KERN_INFO "bmoca: adding minor #%d at base 0x%08llx, IRQ %d, "
+		"I2C 0x%08llx/0x%02x\n", priv->minor,
+		(unsigned long long)mres->start, priv->irq,
+		(unsigned long long)pd->bcm3450_i2c_base, pd->bcm3450_i2c_addr);
+
+	minor_tbl[priv->minor] = priv;
+	priv->dev = device_create(moca_class, NULL,
+		MKDEV(MOCA_MAJOR, priv->minor), NULL, "bmoca%d", priv->minor);
+	if (IS_ERR(priv->dev)) {
+		printk(KERN_WARNING "bmoca: can't register class device\n");
+		priv->dev = NULL;
+	}
+
+	moca_enable_irq(priv);
+	if (request_irq(priv->irq, moca_interrupt, 0, "moca", priv) < 0) {
+		printk(KERN_WARNING  "%s: can't request interrupt\n",
+			__func__);
+		err = -EIO;
+		goto bad2;
+	}
+
+	if (err)
+		goto bad2;
+
+	return 0;
+
+bad2:
+	if (!pd->use_spi) {
+		if (priv->base)
+			iounmap(priv->base);
+		if (priv->i2c_base)
+			iounmap(priv->i2c_base);
+	}
+bad:
+	kfree(priv);
+	return err;
+}
+
+static int moca_remove(struct platform_device *pdev)
+{
+	struct moca_priv_data *priv = dev_get_drvdata(&pdev->dev);
+	struct moca_platform_data *pd = pdev->dev.platform_data;
+	struct clk *clk = priv->clk;
+	struct clk *phy_clk = priv->phy_clk;
+	struct clk *cpu_clk = priv->cpu_clk;
+
+	if (priv->dev)
+		device_destroy(moca_class, MKDEV(MOCA_MAJOR, priv->minor));
+	minor_tbl[priv->minor] = NULL;
+
+	/* free irq if it is used (not used on 6802) */
+	if (priv->irq)
+		free_irq(priv->irq, priv);
+
+	if (!pd->use_spi) {
+		iounmap(priv->i2c_base);
+		iounmap(priv->base);
+	}
+	kfree(priv);
+
+	clk_put(clk);
+	clk_put(phy_clk);
+	clk_put(cpu_clk);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int moca_suspend(struct device *dev)
+{
+	/* do not do anything on suspend.
+	MoCA core is not necessarily stopped */
+	return 0;
+}
+
+static int moca_resume(struct device *dev)
+{
+	return 0;
+}
+
+static const struct dev_pm_ops moca_pm_ops = {
+	.suspend		= moca_suspend,
+	.resume			= moca_resume,
+};
+
+#endif
+
+static struct platform_driver moca_plat_drv = {
+	.probe =		moca_probe,
+	.remove =		moca_remove,
+	.driver = {
+		.name =		"bmoca",
+		.owner =	THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm =		&moca_pm_ops,
+#endif
+	},
+};
+
+static int moca_init(void)
+{
+	int ret;
+	memset(minor_tbl, 0, sizeof(minor_tbl));
+	ret = register_chrdev(MOCA_MAJOR, MOCA_CLASS, &moca_fops);
+	if (ret < 0) {
+		printk(KERN_ERR "bmoca: can't register major %d\n", MOCA_MAJOR);
+		goto bad;
+	}
+
+	moca_class = class_create(THIS_MODULE, MOCA_CLASS);
+	if (IS_ERR(moca_class)) {
+		printk(KERN_ERR "bmoca: can't create device class\n");
+		ret = PTR_ERR(moca_class);
+		goto bad2;
+	}
+
+#if MOCA6816
+	ret = moca_platform_dev_register();
+
+	if (ret < 0) {
+		printk(KERN_ERR "bmoca: can't register platform_device\n");
+		goto bad3;
+	}
+#endif
+
+	ret = platform_driver_register(&moca_plat_drv);
+	if (ret < 0) {
+		printk(KERN_ERR "bmoca: can't register platform_driver\n");
+		goto bad3;
+	}
+
+	return 0;
+
+bad3:
+#if MOCA6816
+	moca_platform_dev_unregister();
+#endif
+	class_destroy(moca_class);
+bad2:
+	unregister_chrdev(MOCA_MAJOR, MOCA_CLASS);
+bad:
+	return ret;
+}
+
+static void moca_exit(void)
+{
+	unregister_chrdev(MOCA_MAJOR, MOCA_CLASS);
+	platform_driver_unregister(&moca_plat_drv);
+#if MOCA6816
+	moca_platform_dev_unregister();
+#endif
+	class_destroy(moca_class);
+}
+
+module_init(moca_init);
+module_exit(moca_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("MoCA messaging driver");
diff --git a/3.8/bmoca.c b/3.8/bmoca.c
new file mode 100644
index 0000000..6ca337e
--- /dev/null
+++ b/3.8/bmoca.c
@@ -0,0 +1,2526 @@
+/*
+ * Copyright (C) 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt)            KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/poll.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/version.h>
+#include <linux/scatterlist.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/clk-brcmstb.h>
+#include <linux/netdevice.h>
+
+#define DRV_VERSION		0x00040000
+#define DRV_BUILD_NUMBER	0x20110831
+
+#if defined(CONFIG_BRCMSTB)
+#define MOCA6816		0
+#include <linux/bmoca.h>
+#elif defined(DSL_MOCA)
+#define MOCA6816		1
+#include "bmoca.h"
+#include <boardparms.h>
+#include <bcm3450.h>
+#include <linux/netdevice.h>
+#else
+#define MOCA6816		1
+#include <linux/bmoca.h>
+#endif
+
+#if defined(CONFIG_BRCMSTB)
+#include <linux/brcmstb/brcmstb.h>
+#endif
+
+#define MOCA_ENABLE		1
+#define MOCA_DISABLE		0
+
+#define OFF_PKT_REINIT_MEM	0x00a08000
+#define PKT_REINIT_MEM_SIZE	(32 * 1024)
+#define PKT_REINIT_MEM_END	(OFF_PKT_REINIT_MEM  + PKT_REINIT_MEM_SIZE)
+
+/* The mailbox layout is different for MoCA 2.0 compared to
+   MoCA 1.1 */
+
+/* MoCA 1.1 mailbox layout */
+#define HOST_REQ_SIZE_11        304
+#define HOST_RESP_SIZE_11       256
+#define CORE_REQ_SIZE_11        400
+#define CORE_RESP_SIZE_11       64
+
+/* MoCA 1.1 offsets from the mailbox pointer */
+#define HOST_REQ_OFFSET_11      0
+#define HOST_RESP_OFFSET_11     (HOST_REQ_OFFSET_11 + HOST_REQ_SIZE_11)
+#define CORE_REQ_OFFSET_11      (HOST_RESP_OFFSET_11 + HOST_RESP_SIZE_11)
+#define CORE_RESP_OFFSET_11     (CORE_REQ_OFFSET_11 + CORE_REQ_SIZE_11)
+
+/* MoCA 2.0 mailbox layout */
+#define HOST_REQ_SIZE_20        512
+#define HOST_RESP_SIZE_20       512
+#define CORE_REQ_SIZE_20        512
+#define CORE_RESP_SIZE_20       512
+
+/* MoCA 2.0 offsets from the mailbox pointer */
+#define HOST_REQ_OFFSET_20      0
+#define HOST_RESP_OFFSET_20     (HOST_REQ_OFFSET_20 + 0)
+#define CORE_REQ_OFFSET_20      (HOST_RESP_OFFSET_20 + HOST_RESP_SIZE_20)
+#define CORE_RESP_OFFSET_20     (CORE_REQ_OFFSET_20 + 0)
+
+#define HOST_REQ_SIZE_MAX       HOST_REQ_SIZE_20
+#define CORE_REQ_SIZE_MAX       CORE_REQ_SIZE_20
+#define CORE_RESP_SIZE_MAX      CORE_RESP_SIZE_20
+
+/* local H2M, M2H buffers */
+#define NUM_CORE_MSG		32
+#define NUM_HOST_MSG		8
+
+#define FW_CHUNK_SIZE		4096
+#define MAX_BL_CHUNKS		8
+#define MAX_FW_SIZE		(1024 * 1024)
+#define MAX_FW_PAGES		((MAX_FW_SIZE >> PAGE_SHIFT) + 1)
+#define MAX_LAB_PRINTF		104
+
+#ifdef __LITTLE_ENDIAN
+#define M2M_WRITE		(BIT(31) | BIT(27) | BIT(28))
+#define M2M_READ		(BIT(30) | BIT(27) | BIT(28))
+#else
+#define M2M_WRITE		(BIT(31) | BIT(27))
+#define M2M_READ		(BIT(30) | BIT(27))
+#endif
+
+#define M2M_TIMEOUT_MS		10
+
+#define NO_FLUSH_IRQ		0
+#define FLUSH_IRQ		1
+#define FLUSH_DMA_ONLY		2
+#define FLUSH_REQRESP_ONLY	3
+
+#define DEFAULT_PHY_CLOCK	(300 * 1000000)
+
+/* DMA buffers may not share a cache line with anything else */
+#define __DMA_ALIGN__		__aligned(L1_CACHE_BYTES)
+
+struct moca_host_msg {
+	u32			data[HOST_REQ_SIZE_MAX / 4] __DMA_ALIGN__;
+	struct list_head	chain __DMA_ALIGN__;
+	u32			len;
+};
+
+struct moca_core_msg {
+	u32			data[CORE_REQ_SIZE_MAX / 4] __DMA_ALIGN__;
+	struct list_head	chain __DMA_ALIGN__;
+	u32			len;
+};
+
+struct moca_regs {
+	unsigned int		data_mem_offset;
+	unsigned int		data_mem_size;
+	unsigned int		cntl_mem_size;
+	unsigned int		cntl_mem_offset;
+	unsigned int		gp0_offset;
+	unsigned int		gp1_offset;
+	unsigned int		ringbell_offset;
+	unsigned int		l2_status_offset;
+	unsigned int		l2_clear_offset;
+	unsigned int		l2_mask_set_offset;
+	unsigned int		l2_mask_clear_offset;
+	unsigned int		sw_reset_offset;
+	unsigned int		led_ctrl_offset;
+	unsigned int		m2m_src_offset;
+	unsigned int		m2m_dst_offset;
+	unsigned int		m2m_cmd_offset;
+	unsigned int		m2m_status_offset;
+	unsigned int		moca2host_mmp_inbox_0_offset;
+	unsigned int		moca2host_mmp_inbox_1_offset;
+	unsigned int		moca2host_mmp_inbox_2_offset;
+	unsigned int		h2m_resp_bit[2]; /* indexed by cpu */
+	unsigned int		h2m_req_bit[2]; /* indexed by cpu */
+	unsigned int		sideband_gmii_fc_offset;
+};
+
+struct moca_priv_data {
+	struct platform_device	*pdev;
+	struct device		*dev;
+
+	unsigned int		minor;
+	int			irq;
+	struct work_struct	work;
+	void __iomem		*base;
+	void __iomem		*i2c_base;
+	struct platform_device	*enet_pdev;
+
+	unsigned int		mbx_offset[2]; /* indexed by MoCA cpu */
+	struct page		*fw_pages[MAX_FW_PAGES];
+	struct scatterlist	fw_sg[MAX_FW_PAGES];
+	struct completion	copy_complete;
+	struct completion	chunk_complete;
+
+	struct list_head	host_msg_free_list;
+	struct list_head	host_msg_pend_list;
+	struct moca_host_msg	host_msg_queue[NUM_HOST_MSG] __DMA_ALIGN__;
+	wait_queue_head_t	host_msg_wq;
+
+	struct list_head	core_msg_free_list;
+	struct list_head	core_msg_pend_list;
+	u32		core_resp_buf[CORE_RESP_SIZE_MAX / 4] __DMA_ALIGN__;
+	struct moca_core_msg	core_msg_queue[NUM_CORE_MSG] __DMA_ALIGN__;
+	struct moca_core_msg	core_msg_temp __DMA_ALIGN__;
+	wait_queue_head_t	core_msg_wq;
+
+	spinlock_t		list_lock;
+	spinlock_t		clock_lock;
+	spinlock_t		irq_status_lock;
+	struct mutex		dev_mutex;
+	struct mutex		copy_mutex;
+	struct mutex		moca_i2c_mutex;
+	int			host_mbx_busy;
+	int			host_resp_pending;
+	int			core_req_pending;
+	int			assert_pending;
+	int			wdt_pending;
+
+	int			enabled;
+	int			running;
+	int			wol_enabled;
+	struct clk		*clk;
+	struct clk		*phy_clk;
+	struct clk		*cpu_clk;
+
+	int			refcount;
+	unsigned long		start_time;
+	dma_addr_t		tpcap_buf_phys;
+
+	unsigned int		bonded_mode;
+	unsigned int		phy_freq;
+
+	unsigned int		hw_rev;
+
+	const struct moca_regs	*regs;
+
+	/* MMP Parameters */
+	unsigned int		mmp_20;
+	unsigned int		host_req_size;
+	unsigned int		host_resp_size;
+	unsigned int		core_req_size;
+	unsigned int		core_resp_size;
+	unsigned int		host_req_offset;
+	unsigned int		host_resp_offset;
+	unsigned int		core_req_offset;
+	unsigned int		core_resp_offset;
+};
+
+static const struct moca_regs regs_11_plus = {
+	.data_mem_offset		= 0,
+	.data_mem_size			= (256 * 1024),
+	.cntl_mem_offset		= 0x00040000,
+	.cntl_mem_size			= (128 * 1024),
+	.gp0_offset			= 0x000a2050,
+	.gp1_offset			= 0x000a2054,
+	.ringbell_offset		= 0x000a2060,
+	.l2_status_offset		= 0x000a2080,
+	.l2_clear_offset		= 0x000a2088,
+	.l2_mask_set_offset		= 0x000a2090,
+	.l2_mask_clear_offset		= 0x000a2094,
+	.sw_reset_offset		= 0x000a2040,
+	.led_ctrl_offset		= 0x000a204c,
+	.led_ctrl_offset		= 0x000a204c,
+	.m2m_src_offset			= 0x000a2000,
+	.m2m_dst_offset			= 0x000a2004,
+	.m2m_cmd_offset			= 0x000a2008,
+	.m2m_status_offset		= 0x000a200c,
+	.h2m_resp_bit[1]		= 0x1,
+	.h2m_req_bit[1]			= 0x2,
+	.sideband_gmii_fc_offset	= 0x000a1420
+};
+
+static const struct moca_regs regs_11_lite = {
+	.data_mem_offset		= 0,
+	.data_mem_size			= (96 * 1024),
+	.cntl_mem_offset		= 0x0004c000,
+	.cntl_mem_size			= (80 * 1024),
+	.gp0_offset			= 0x000a2050,
+	.gp1_offset			= 0x000a2054,
+	.ringbell_offset		= 0x000a2060,
+	.l2_status_offset		= 0x000a2080,
+	.l2_clear_offset		= 0x000a2088,
+	.l2_mask_set_offset		= 0x000a2090,
+	.l2_mask_clear_offset		= 0x000a2094,
+	.sw_reset_offset		= 0x000a2040,
+	.led_ctrl_offset		= 0x000a204c,
+	.led_ctrl_offset		= 0x000a204c,
+	.m2m_src_offset			= 0x000a2000,
+	.m2m_dst_offset			= 0x000a2004,
+	.m2m_cmd_offset			= 0x000a2008,
+	.m2m_status_offset		= 0x000a200c,
+	.h2m_resp_bit[1]		= 0x1,
+	.h2m_req_bit[1]			= 0x2,
+	.sideband_gmii_fc_offset	= 0x000a1420
+};
+
+static const struct moca_regs regs_11 = {
+	.data_mem_offset		= 0,
+	.data_mem_size			= (256 * 1024),
+	.cntl_mem_offset		= 0x0004c000,
+	.cntl_mem_size			= (80 * 1024),
+	.gp0_offset			= 0x000a2050,
+	.gp1_offset			= 0x000a2054,
+	.ringbell_offset		= 0x000a2060,
+	.l2_status_offset		= 0x000a2080,
+	.l2_clear_offset		= 0x000a2088,
+	.l2_mask_set_offset		= 0x000a2090,
+	.l2_mask_clear_offset		= 0x000a2094,
+	.sw_reset_offset		= 0x000a2040,
+	.led_ctrl_offset		= 0x000a204c,
+	.m2m_src_offset			= 0x000a2000,
+	.m2m_dst_offset			= 0x000a2004,
+	.m2m_cmd_offset			= 0x000a2008,
+	.m2m_status_offset		= 0x000a200c,
+	.h2m_resp_bit[1]		= 0x1,
+	.h2m_req_bit[1]			= 0x2,
+	.sideband_gmii_fc_offset	= 0x000a1420
+};
+
+static const struct moca_regs regs_20 = {
+	.data_mem_offset		= 0,
+	.data_mem_size			= (288 * 1024),
+	.cntl_mem_offset		= 0x00120000,
+	.cntl_mem_size			= (384 * 1024),
+	.gp0_offset			= 0,
+	.gp1_offset			= 0,
+	.ringbell_offset		= 0x001ffd0c,
+	.l2_status_offset		= 0x001ffc40,
+	.l2_clear_offset		= 0x001ffc48,
+	.l2_mask_set_offset		= 0x001ffc50,
+	.l2_mask_clear_offset		= 0x001ffc54,
+	.sw_reset_offset		= 0x001ffd00,
+	.led_ctrl_offset		= 0,
+	.m2m_src_offset			= 0x001ffc00,
+	.m2m_dst_offset			= 0x001ffc04,
+	.m2m_cmd_offset			= 0x001ffc08,
+	.m2m_status_offset		= 0x001ffc0c,
+	.moca2host_mmp_inbox_0_offset	= 0x001ffd58,
+	.moca2host_mmp_inbox_1_offset	= 0x001ffd5c,
+	.moca2host_mmp_inbox_2_offset	= 0x001ffd60,
+	.h2m_resp_bit[1]		= 0x10,
+	.h2m_req_bit[1]			= 0x20,
+	.h2m_resp_bit[0]		= 0x1,
+	.h2m_req_bit[0]			= 0x2,
+	.sideband_gmii_fc_offset	= 0x001fec18
+};
+
+#define MOCA_FW_MAGIC		0x4d6f4341
+
+struct moca_fw_hdr {
+	uint32_t		jump[2];
+	uint32_t		length;
+	uint32_t		cpuid;
+	uint32_t		magic;
+	uint32_t		hw_rev;
+	uint32_t		bl_chunks;
+	uint32_t		res1;
+};
+
+struct bsc_regs {
+	u32			chip_address;
+	u32			data_in[8];
+	u32			cnt_reg;
+	u32			ctl_reg;
+	u32			iic_enable;
+	u32			data_out[8];
+	u32			ctlhi_reg;
+	u32			scl_param;
+};
+
+/* support for multiple MoCA devices */
+#define NUM_MINORS		8
+static struct moca_priv_data *minor_tbl[NUM_MINORS];
+static struct class *moca_class;
+
+/* character major device number */
+#define MOCA_MAJOR		234
+#define MOCA_CLASS		"bmoca"
+
+#define M2H_RESP		BIT(0)
+#define M2H_REQ			BIT(1)
+#define M2H_ASSERT		BIT(2)
+#define M2H_NEXTCHUNK		BIT(3)
+#define M2H_NEXTCHUNK_CPU0	BIT(4)
+#define M2H_WDT_CPU0		BIT(6)
+#define M2H_WDT_CPU1		BIT(10)
+#define M2H_DMA			BIT(11)
+
+#define M2H_RESP_CPU0		BIT(13)
+#define M2H_REQ_CPU0		BIT(14)
+#define M2H_ASSERT_CPU0		BIT(15)
+
+/* does this word contain a NIL byte (i.e. end of string)? */
+#define HAS0(x)			((((x) & 0xff) == 0) || \
+				 (((x) & 0xff00) == 0) || \
+				 (((x) & 0xff0000) == 0) || \
+				 (((x) & 0xff000000) == 0))
+
+#define MOCA_SET(x, y)	 MOCA_WR(x, MOCA_RD(x) | (y))
+#define MOCA_UNSET(x, y) MOCA_WR(x, MOCA_RD(x) & ~(y))
+
+static void moca_3450_write_i2c(struct moca_priv_data *priv, u8 addr,
+				u32 data);
+static u32 moca_3450_read_i2c(struct moca_priv_data *priv, u8 addr);
+static int moca_get_mbx_offset(struct moca_priv_data *priv);
+
+#define INRANGE(x, a, b)	(((x) >= (a)) && ((x) < (b)))
+
+static inline int moca_range_ok(struct moca_priv_data *priv,
+	unsigned long offset, unsigned long len)
+{
+	const struct moca_regs *r = priv->regs;
+	unsigned long lastad = offset + len - 1;
+
+	if (lastad < offset)
+		return -EINVAL;
+
+	if (INRANGE(offset, r->cntl_mem_offset,
+		    r->cntl_mem_offset + r->cntl_mem_size) &&
+	    INRANGE(lastad, r->cntl_mem_offset,
+		    r->cntl_mem_offset + r->cntl_mem_size))
+		return 0;
+
+	if (INRANGE(offset, r->data_mem_offset,
+		    r->data_mem_offset + r->data_mem_size) &&
+	    INRANGE(lastad, r->data_mem_offset,
+		    r->data_mem_offset + r->data_mem_size))
+		return 0;
+
+	if (INRANGE(offset, OFF_PKT_REINIT_MEM, PKT_REINIT_MEM_END) &&
+	    INRANGE(lastad, OFF_PKT_REINIT_MEM, PKT_REINIT_MEM_END))
+		return 0;
+
+	return -EINVAL;
+}
+
+static void moca_mmp_init(struct moca_priv_data *priv, int is20)
+{
+	if (is20) {
+		priv->host_req_size    = HOST_REQ_SIZE_20;
+		priv->host_resp_size   = HOST_RESP_SIZE_20;
+		priv->core_req_size    = CORE_REQ_SIZE_20;
+		priv->core_resp_size   = CORE_RESP_SIZE_20;
+		priv->host_req_offset  = HOST_REQ_OFFSET_20;
+		priv->host_resp_offset = HOST_RESP_OFFSET_20;
+		priv->core_req_offset  = CORE_REQ_OFFSET_20;
+		priv->core_resp_offset = CORE_RESP_OFFSET_20;
+		priv->mmp_20 = 1;
+	} else {
+		priv->host_req_size    = HOST_REQ_SIZE_11;
+		priv->host_resp_size   = HOST_RESP_SIZE_11;
+		priv->core_req_size    = CORE_REQ_SIZE_11;
+		priv->core_resp_size   = CORE_RESP_SIZE_11;
+		priv->host_req_offset  = HOST_REQ_OFFSET_11;
+		priv->host_resp_offset = HOST_RESP_OFFSET_11;
+		priv->core_req_offset  = CORE_REQ_OFFSET_11;
+		priv->core_resp_offset = CORE_RESP_OFFSET_11;
+		priv->mmp_20 = 0;
+	}
+}
+
+static int moca_is_20(struct moca_priv_data *priv)
+{
+	return (priv->hw_rev & MOCA_PROTVER_MASK) == MOCA_PROTVER_20;
+}
+
+#ifdef CONFIG_BRCM_MOCA_BUILTIN_FW
+#error Not supported in this version
+#else
+static const char *bmoca_fw_image;
+#endif
+
+/*
+ * LOW-LEVEL DEVICE OPERATIONS
+ */
+
+#define MOCA_RD(x)		__raw_readl((void __iomem *)(x))
+#define MOCA_WR(x, y)		__raw_writel((y), (void __iomem *)(x))
+
+#define I2C_RD(x)		MOCA_RD(x)
+#define I2C_WR(x, y)		MOCA_WR(x, y)
+
+static void moca_hw_reset(struct moca_priv_data *priv)
+{
+	const struct moca_regs *r = priv->regs;
+
+	/* disable and clear all interrupts */
+	MOCA_WR(priv->base + r->l2_mask_set_offset, 0xffffffff);
+	MOCA_RD(priv->base + r->l2_mask_set_offset);
+
+	/* assert resets */
+
+	/* reset CPU first, both CPUs for MoCA 20 HW */
+	if (moca_is_20(priv))
+		MOCA_SET(priv->base + r->sw_reset_offset, 5);
+	else
+		MOCA_SET(priv->base + r->sw_reset_offset, 1);
+
+	MOCA_RD(priv->base + r->sw_reset_offset);
+
+	udelay(20);
+
+	/* reset everything else except clocks */
+	MOCA_SET(priv->base + r->sw_reset_offset, ~(BIT(3) | BIT(7)));
+	MOCA_RD(priv->base + r->sw_reset_offset);
+
+	/* disable clocks */
+	MOCA_SET(priv->base + r->sw_reset_offset, ~BIT(3));
+	MOCA_RD(priv->base + r->sw_reset_offset);
+
+	MOCA_WR(priv->base + r->l2_clear_offset, 0xffffffff);
+	MOCA_RD(priv->base + r->l2_clear_offset);
+}
+
+/* called any time we start/restart/stop MoCA */
+static void moca_hw_init(struct moca_priv_data *priv, int action)
+{
+	const struct moca_regs *r = priv->regs;
+	int clk_status = 0;
+
+	if (action == MOCA_ENABLE && !priv->enabled) {
+		clk_status = clk_prepare_enable(priv->clk);
+		if (clk_status != 0) {
+			dev_err(priv->dev, "moca clk enable failed\n");
+			goto clk_err_chk;
+		}
+
+		clk_status = clk_prepare_enable(priv->phy_clk);
+		if (clk_status != 0) {
+			dev_err(priv->dev, "moca phy clk enable failed\n");
+			goto clk_err_chk;
+		}
+		clk_status = clk_prepare_enable(priv->cpu_clk);
+		if (clk_status != 0)
+			dev_err(priv->dev, "moca cpu clk enable failed\n");
+
+clk_err_chk:
+		priv->enabled = clk_status ? 0 : 1;
+	}
+
+	/* clock not enabled, register accesses will fail with bus error */
+	if (!priv->enabled)
+		return;
+
+	moca_hw_reset(priv);
+	udelay(1);
+
+	if (action == MOCA_ENABLE) {
+		/* deassert moca_sys_reset and clock */
+		MOCA_UNSET(priv->base + r->sw_reset_offset, BIT(1) | BIT(7));
+
+		if (priv->hw_rev >= HWREV_MOCA_20_GEN22) {
+			/* Take PHY0 out of reset and enable clock */
+			MOCA_UNSET(priv->base + r->sw_reset_offset,
+				   BIT(4) | BIT(8));
+
+			if (priv->bonded_mode) {
+				/* Take PHY1 out of reset and enable clock */
+				MOCA_UNSET(priv->base + r->sw_reset_offset,
+					   BIT(5) | BIT(9));
+			}
+		}
+		MOCA_RD(priv->base + r->sw_reset_offset);
+	}
+
+	if (!moca_is_20(priv)) {
+		/* clear junk out of GP0/GP1 */
+		MOCA_WR(priv->base + r->gp0_offset, 0xffffffff);
+		MOCA_WR(priv->base + r->gp1_offset, 0x0);
+		/* set up activity LED for 50% duty cycle */
+		MOCA_WR(priv->base + r->led_ctrl_offset, 0x40004000);
+	}
+
+	/* enable DMA completion interrupts */
+	MOCA_WR(priv->base + r->ringbell_offset, 0);
+	MOCA_WR(priv->base + r->l2_mask_clear_offset, M2H_DMA);
+	MOCA_RD(priv->base + r->l2_mask_clear_offset);
+
+	if (action == MOCA_DISABLE && priv->enabled) {
+		priv->enabled = 0;
+		clk_disable_unprepare(priv->cpu_clk);
+		clk_disable_unprepare(priv->phy_clk);
+		clk_disable_unprepare(priv->clk);
+	}
+}
+
+static void moca_ringbell(struct moca_priv_data *priv, u32 mask)
+{
+	const struct moca_regs *r = priv->regs;
+
+	MOCA_WR(priv->base + r->ringbell_offset, mask);
+	MOCA_RD(priv->base + r->ringbell_offset);
+}
+
+static u32 moca_irq_status(struct moca_priv_data *priv, int flush)
+{
+	const struct moca_regs *r = priv->regs;
+	u32 stat, dma_mask = M2H_DMA | M2H_NEXTCHUNK;
+	unsigned long flags;
+
+	if (moca_is_20(priv))
+		dma_mask |= M2H_NEXTCHUNK_CPU0;
+
+	spin_lock_irqsave(&priv->irq_status_lock, flags);
+
+	stat = MOCA_RD(priv->base + priv->regs->l2_status_offset);
+
+	if (flush == FLUSH_IRQ) {
+		MOCA_WR(priv->base + r->l2_clear_offset, stat);
+		MOCA_RD(priv->base + r->l2_clear_offset);
+	}
+	if (flush == FLUSH_DMA_ONLY) {
+		MOCA_WR(priv->base + r->l2_clear_offset,
+			stat & dma_mask);
+		MOCA_RD(priv->base + r->l2_clear_offset);
+	}
+	if (flush == FLUSH_REQRESP_ONLY) {
+		MOCA_WR(priv->base + r->l2_clear_offset,
+			stat & (M2H_RESP | M2H_REQ |
+			M2H_RESP_CPU0 | M2H_REQ_CPU0));
+		MOCA_RD(priv->base + r->l2_clear_offset);
+	}
+
+	spin_unlock_irqrestore(&priv->irq_status_lock, flags);
+
+	return stat;
+}
+
+static void moca_enable_irq(struct moca_priv_data *priv)
+{
+	const struct moca_regs *r = priv->regs;
+
+	/* unmask everything */
+	u32 mask = M2H_REQ | M2H_RESP | M2H_ASSERT | M2H_WDT_CPU1 |
+		M2H_NEXTCHUNK | M2H_DMA;
+
+	if (moca_is_20(priv))
+		mask |= M2H_WDT_CPU0 | M2H_NEXTCHUNK_CPU0 |
+			M2H_REQ_CPU0 | M2H_RESP_CPU0 | M2H_ASSERT_CPU0;
+
+	MOCA_WR(priv->base + r->l2_mask_clear_offset, mask);
+	MOCA_RD(priv->base + r->l2_mask_clear_offset);
+}
+
+static void moca_disable_irq(struct moca_priv_data *priv)
+{
+	const struct moca_regs *r = priv->regs;
+
+	/* mask everything except DMA completions */
+	u32 mask = M2H_REQ | M2H_RESP | M2H_ASSERT | M2H_WDT_CPU1 |
+		M2H_NEXTCHUNK;
+
+	if (moca_is_20(priv))
+		mask |= M2H_WDT_CPU0 | M2H_NEXTCHUNK_CPU0 |
+			M2H_REQ_CPU0 | M2H_RESP_CPU0 | M2H_ASSERT_CPU0;
+
+	MOCA_WR(priv->base + r->l2_mask_set_offset, mask);
+	MOCA_RD(priv->base + r->l2_mask_set_offset);
+}
+
+static u32 moca_start_mips(struct moca_priv_data *priv, u32 cpu)
+{
+	const struct moca_regs *r = priv->regs;
+
+	if (moca_is_20(priv)) {
+		if (cpu == 1)
+			MOCA_UNSET(priv->base + r->sw_reset_offset, BIT(0));
+		else {
+			moca_mmp_init(priv, 1);
+			MOCA_UNSET(priv->base + r->sw_reset_offset, BIT(2));
+		}
+	} else
+		MOCA_UNSET(priv->base + r->sw_reset_offset, BIT(0));
+	MOCA_RD(priv->base + r->sw_reset_offset);
+	return 0;
+}
+
+static void moca_m2m_xfer(struct moca_priv_data *priv,
+	u32 dst, u32 src, u32 ctl)
+{
+	const struct moca_regs *r = priv->regs;
+	u32 status;
+
+	MOCA_WR(priv->base + r->m2m_src_offset, src);
+	MOCA_WR(priv->base + r->m2m_dst_offset, dst);
+	MOCA_WR(priv->base + r->m2m_status_offset, 0);
+	MOCA_RD(priv->base + r->m2m_status_offset);
+	MOCA_WR(priv->base + r->m2m_cmd_offset, ctl);
+
+	if (wait_for_completion_timeout(&priv->copy_complete,
+		1000 * M2M_TIMEOUT_MS) <= 0) {
+		dev_warn(priv->dev, "DMA interrupt timed out, status %x\n",
+			 moca_irq_status(priv, NO_FLUSH_IRQ));
+	}
+
+	status = MOCA_RD(priv->base + r->m2m_status_offset);
+
+	if (status & (3 << 29))
+		dev_warn(priv->dev, "bad status %08x (s/d/c %08x %08x %08x)\n",
+			 status, src, dst, ctl);
+}
+
+static void moca_write_mem(struct moca_priv_data *priv,
+	u32 dst_offset, void *src, unsigned int len)
+{
+	dma_addr_t pa;
+
+	if (moca_range_ok(priv, dst_offset, len) < 0) {
+		dev_warn(priv->dev, "copy past end of cntl memory: %08x\n",
+			 dst_offset);
+		return;
+	}
+
+	pa = dma_map_single(&priv->pdev->dev, src, len, DMA_TO_DEVICE);
+	moca_m2m_xfer(priv, dst_offset + priv->regs->data_mem_offset, (u32)pa,
+		len | M2M_WRITE);
+	dma_unmap_single(&priv->pdev->dev, pa, len, DMA_TO_DEVICE);
+}
+
+static void moca_read_mem(struct moca_priv_data *priv,
+	void *dst, u32 src_offset, unsigned int len)
+{
+	int i;
+
+	if (moca_range_ok(priv, src_offset, len) < 0) {
+		dev_warn(priv->dev, "copy past end of cntl memory: %08x\n",
+			 src_offset);
+		return;
+	}
+
+	for (i = 0; i < len; i += 4)
+		DEV_WR(dst + i, cpu_to_be32(
+			MOCA_RD(priv->base + src_offset +
+				priv->regs->data_mem_offset + i)));
+}
+
+static void moca_write_sg(struct moca_priv_data *priv,
+	u32 dst_offset, struct scatterlist *sg, int nents)
+{
+	int j;
+	uintptr_t addr = priv->regs->data_mem_offset + dst_offset;
+
+	dma_map_sg(&priv->pdev->dev, sg, nents, DMA_TO_DEVICE);
+
+	for (j = 0; j < nents; j++) {
+		moca_m2m_xfer(priv, addr, (u32)sg[j].dma_address,
+			sg[j].length | M2M_WRITE);
+
+		addr += sg[j].length;
+	}
+
+	dma_unmap_sg(&priv->pdev->dev, sg, nents, DMA_TO_DEVICE);
+}
+
+static inline void moca_read_sg(struct moca_priv_data *priv,
+	u32 src_offset, struct scatterlist *sg, int nents)
+{
+	int j;
+	uintptr_t addr = priv->regs->data_mem_offset + src_offset;
+
+	dma_map_sg(&priv->pdev->dev, sg, nents, DMA_FROM_DEVICE);
+
+	for (j = 0; j < nents; j++) {
+		moca_m2m_xfer(priv, (u32)sg[j].dma_address, addr,
+			sg[j].length | M2M_READ);
+
+		addr += sg[j].length;
+		SetPageDirty(sg_page(&sg[j]));
+	}
+
+	dma_unmap_sg(&priv->pdev->dev, sg, nents, DMA_FROM_DEVICE);
+}
+
+#define moca_3450_write moca_3450_write_i2c
+#define moca_3450_read moca_3450_read_i2c
+
+static void moca_put_pages(struct moca_priv_data *priv, int pages)
+{
+	int i;
+
+	for (i = 0; i < pages; i++)
+		page_cache_release(priv->fw_pages[i]);
+}
+
+static int moca_get_pages(struct moca_priv_data *priv, unsigned long addr,
+	int size, unsigned int moca_addr, int write)
+{
+	unsigned int pages, chunk_size;
+	int ret, i;
+
+	if (addr & 3)
+		return -EINVAL;
+	if ((size <= 0) || (size > MAX_FW_SIZE))
+		return -EINVAL;
+
+	pages = ((addr & ~PAGE_MASK) + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+	down_read(&current->mm->mmap_sem);
+	ret = get_user_pages(current, current->mm, addr & PAGE_MASK, pages,
+		write, 0, priv->fw_pages, NULL);
+	up_read(&current->mm->mmap_sem);
+
+	if (ret < 0)
+		return ret;
+	BUG_ON((ret > MAX_FW_PAGES) || (pages == 0));
+
+	if (ret < pages) {
+		dev_warn(priv->dev,
+			 "get_user_pages returned %d expecting %d\n",
+			 ret, pages);
+		moca_put_pages(priv, ret);
+		return -EFAULT;
+	}
+
+	chunk_size = PAGE_SIZE - (addr & ~PAGE_MASK);
+	if (size < chunk_size)
+		chunk_size = size;
+
+	sg_set_page(&priv->fw_sg[0], priv->fw_pages[0], chunk_size,
+		addr & ~PAGE_MASK);
+	size -= chunk_size;
+
+	for (i = 1; i < pages; i++) {
+		sg_set_page(&priv->fw_sg[i], priv->fw_pages[i],
+			size > PAGE_SIZE ? PAGE_SIZE : size, 0);
+		size -= PAGE_SIZE;
+	}
+	return ret;
+}
+
+static int moca_write_img(struct moca_priv_data *priv, struct moca_xfer *x)
+{
+	int pages, i, ret = -EINVAL;
+	struct moca_fw_hdr hdr;
+	u32 bl_chunks;
+
+	if (copy_from_user(&hdr, (void __user *)(unsigned long)x->buf,
+			sizeof(hdr)))
+		return -EFAULT;
+
+	bl_chunks = be32_to_cpu(hdr.bl_chunks);
+	if (!bl_chunks || (bl_chunks > MAX_BL_CHUNKS))
+		bl_chunks = 1;
+
+	pages = moca_get_pages(priv, (unsigned long)x->buf, x->len, 0, 0);
+	if (pages < 0)
+		return pages;
+	if (pages < (bl_chunks + 2))
+		goto out;
+
+	/* host must use FW_CHUNK_SIZE MMU pages (for now) */
+	BUG_ON(FW_CHUNK_SIZE != PAGE_SIZE);
+
+	/* write the first two chunks, then start the MIPS */
+	moca_write_sg(priv, 0, &priv->fw_sg[0], bl_chunks + 1);
+	moca_enable_irq(priv);
+	moca_start_mips(priv, be32_to_cpu(hdr.cpuid));
+	ret = 0;
+
+	/* wait for an ACK, then write each successive chunk */
+	for (i = bl_chunks + 1; i < pages; i++) {
+		if (wait_for_completion_timeout(&priv->chunk_complete,
+				1000 * M2M_TIMEOUT_MS) <= 0) {
+			moca_disable_irq(priv);
+			dev_warn(priv->dev, "chunk ack timed out\n");
+			ret = -EIO;
+			goto out;
+		}
+		moca_write_sg(priv, priv->regs->data_mem_offset +
+			      FW_CHUNK_SIZE * bl_chunks,
+			      &priv->fw_sg[i], 1);
+	}
+
+	/* wait for ACK of last block.  Older firmware images didn't
+	   ACK the last block, so don't return an error */
+	wait_for_completion_timeout(&priv->chunk_complete,
+			1000 * M2M_TIMEOUT_MS / 10);
+
+out:
+	moca_put_pages(priv, pages);
+	return ret;
+}
+
+/*
+ * MESSAGE AND LIST HANDLING
+ */
+
+static void moca_handle_lab_printf(struct moca_priv_data *priv,
+	struct moca_core_msg *m)
+{
+	u32 str_len;
+	u32 str_addr;
+
+	if (priv->mmp_20) {
+		str_len = (be32_to_cpu(m->data[4]) + 3) & ~3;
+		str_addr = be32_to_cpu(m->data[3]) & 0x1fffffff;
+
+		if ((be32_to_cpu(m->data[0]) == 0x3) &&
+		    (be32_to_cpu(m->data[1]) == 12) &&
+		    ((be32_to_cpu(m->data[2]) & 0xffffff) == 0x090801) &&
+		    (be32_to_cpu(m->data[4]) <= MAX_LAB_PRINTF)) {
+			m->len = 3 + str_len;
+			moca_read_mem(priv, &m->data[3], str_addr, str_len);
+
+			m->data[1] = cpu_to_be32(m->len - 8);
+		}
+	} else {
+		str_len = (be32_to_cpu(m->data[3]) + 3) & ~3;
+		str_addr = be32_to_cpu(m->data[2]) & 0x1fffffff;
+
+		if ((be32_to_cpu(m->data[0]) & 0xff0000ff) == 0x09000001 &&
+			be32_to_cpu(m->data[1]) == 0x600b0008 &&
+			(be32_to_cpu(m->data[3]) <= MAX_LAB_PRINTF)) {
+
+			m->len = 8 + str_len;
+			moca_read_mem(priv, &m->data[2], str_addr, str_len);
+
+			m->data[1] = cpu_to_be32((MOCA_IE_DRV_PRINTF << 16) +
+				m->len - 8);
+		}
+	}
+}
+static void moca_msg_reset(struct moca_priv_data *priv)
+{
+	int i;
+
+	if (priv->running)
+		moca_disable_irq(priv);
+	priv->running = 0;
+	priv->host_mbx_busy = 0;
+	priv->host_resp_pending = 0;
+	priv->core_req_pending = 0;
+	priv->assert_pending = 0;
+	priv->mbx_offset[0] = -1;
+	priv->mbx_offset[1] = -1;
+
+	spin_lock_bh(&priv->list_lock);
+	INIT_LIST_HEAD(&priv->core_msg_free_list);
+	INIT_LIST_HEAD(&priv->core_msg_pend_list);
+
+	for (i = 0; i < NUM_CORE_MSG; i++)
+		list_add_tail(&priv->core_msg_queue[i].chain,
+			&priv->core_msg_free_list);
+
+	INIT_LIST_HEAD(&priv->host_msg_free_list);
+	INIT_LIST_HEAD(&priv->host_msg_pend_list);
+
+	for (i = 0; i < NUM_HOST_MSG; i++)
+		list_add_tail(&priv->host_msg_queue[i].chain,
+			&priv->host_msg_free_list);
+	spin_unlock_bh(&priv->list_lock);
+}
+
+static struct list_head *moca_detach_head(struct moca_priv_data *priv,
+	struct list_head *h)
+{
+	struct list_head *r = NULL;
+
+	spin_lock_bh(&priv->list_lock);
+	if (!list_empty(h)) {
+		r = h->next;
+		list_del(r);
+	}
+	spin_unlock_bh(&priv->list_lock);
+
+	return r;
+}
+
+static void moca_attach_tail(struct moca_priv_data *priv,
+	struct list_head *elem, struct list_head *list)
+{
+	spin_lock_bh(&priv->list_lock);
+	list_add_tail(elem, list);
+	spin_unlock_bh(&priv->list_lock);
+}
+
+/* Must have dev_mutex when calling this function */
+static int moca_recvmsg(struct moca_priv_data *priv, uintptr_t offset,
+	u32 max_size, uintptr_t reply_offset, u32 cpuid)
+{
+	struct list_head *ml = NULL;
+	struct moca_core_msg *m;
+	unsigned int w, rw, num_ies;
+	u32 data, size;
+	char *msg;
+	int err = -ENOMEM;
+	u32 *reply = priv->core_resp_buf;
+	int attach = 1;
+
+	m = &priv->core_msg_temp;
+
+	/* make sure we have the mailbox offset before using it */
+	moca_get_mbx_offset(priv);
+
+	/* read only as much as is necessary.
+	   The second word is the length for mmp_20 */
+	if (priv->mmp_20) {
+		moca_read_mem(priv, m->data,
+			offset + priv->mbx_offset[cpuid], 8);
+
+		size = (be32_to_cpu(m->data[1])+3) & 0xFFFFFFFC;
+		/* if size is too large, this is a protocol error.
+		   mocad will output the error message */
+		if (size > max_size - 8)
+			size = max_size - 8;
+
+		moca_read_mem(priv, &m->data[2],
+			offset + priv->mbx_offset[cpuid] + 8, size);
+	} else
+		moca_read_mem(priv, m->data,
+			offset + priv->mbx_offset[cpuid], max_size);
+
+	data = be32_to_cpu(m->data[0]);
+
+	if (priv->mmp_20) {
+		/* In MoCA 2.0, there is only 1 IE per message */
+		num_ies = 1;
+	} else {
+		num_ies = data & 0xffff;
+	}
+
+	if (reply_offset) {
+		if (priv->mmp_20) {
+			/* In MoCA 2.0, the ACK is to simply set the
+			   MSB in the incoming message and send it
+			   back */
+			reply[0] = cpu_to_be32(data | 0x80000000);
+			rw = 1;
+		} else {
+			/* ACK + seq number + number of IEs */
+			reply[0] = cpu_to_be32((data & 0x00ff0000) |
+				0x04000000 | num_ies);
+			rw = 1;
+		}
+	}
+
+	err = -EINVAL;
+	w = 1;
+	max_size >>= 2;
+	while (num_ies) {
+		if (w >= max_size) {
+			msg = "dropping long message";
+			goto bad;
+		}
+
+		data = be32_to_cpu(m->data[w++]);
+
+		if (reply_offset && !priv->mmp_20) {
+			/*
+			 * ACK each IE in the original message;
+			 * return code is always 0
+			 */
+			if ((rw << 2) >= priv->core_resp_size)
+				dev_warn(priv->dev,
+					 "Core ack buffer overflowed\n");
+			else {
+				reply[rw] = cpu_to_be32((data & ~0xffff) | 4);
+				rw++;
+				reply[rw] = cpu_to_be32(0);
+				rw++;
+			}
+		}
+		if (data & 3) {
+			msg = "IE is not a multiple of 4 bytes";
+			goto bad;
+		}
+
+		w += ((data & 0xffff) >> 2);
+
+		if (w > max_size) {
+			msg = "dropping long message";
+			goto bad;
+		}
+		num_ies--;
+	}
+	m->len = w << 2;
+
+	/* special case for lab_printf traps */
+	moca_handle_lab_printf(priv, m);
+
+	/*
+	 * Check to see if we can add this new message to the current queue.
+	 * The result will be a single message with multiple IEs.
+	 */
+	if (!priv->mmp_20) {
+		spin_lock_bh(&priv->list_lock);
+		if (!list_empty(&priv->core_msg_pend_list)) {
+			ml = priv->core_msg_pend_list.prev;
+			m = list_entry(ml, struct moca_core_msg, chain);
+
+			if (m->len + priv->core_msg_temp.len > max_size)
+				ml = NULL;
+			else {
+				u32 d0 = be32_to_cpu(
+						priv->core_msg_temp.data[0]);
+
+				/* Only concatenate traps from the core */
+				if (((be32_to_cpu(m->data[0]) & 0xff000000) !=
+					0x09000000) ||
+					((d0 & 0xff000000) != 0x09000000))
+					ml = NULL;
+				else {
+					/*
+					 * We can add the message to the
+					 * previous one. Update the num of IEs,
+					 * update the length and copy the data.
+					 */
+					data = be32_to_cpu(m->data[0]);
+					num_ies = data & 0xffff;
+					num_ies += d0 & 0xffff;
+					data &= 0xffff0000;
+					data |= num_ies;
+					m->data[0] = cpu_to_be32(data);
+
+					/*
+					 * Subtract 4 bytes from length for
+					   message header
+					 */
+					memcpy(&m->data[m->len >> 2],
+						&priv->core_msg_temp.data[1],
+						priv->core_msg_temp.len - 4);
+					m->len += priv->core_msg_temp.len - 4;
+					attach = 0;
+				}
+			}
+		}
+		spin_unlock_bh(&priv->list_lock);
+	}
+
+	if (ml == NULL) {
+		ml = moca_detach_head(priv, &priv->core_msg_free_list);
+		if (ml == NULL) {
+			msg = "no entries left on core_msg_free_list";
+			err = -ENOMEM;
+			goto bad;
+		}
+		m = list_entry(ml, struct moca_core_msg, chain);
+
+		memcpy(m->data, priv->core_msg_temp.data,
+			priv->core_msg_temp.len);
+		m->len = priv->core_msg_temp.len;
+	}
+
+	if (reply_offset) {
+		if ((cpuid == 1) &&
+			(moca_irq_status(priv, NO_FLUSH_IRQ) & M2H_ASSERT)) {
+			/* do not retry - message is gone forever */
+			err = 0;
+			msg = "core_req overwritten by assertion";
+			goto bad;
+		}
+		if ((cpuid == 0) &&
+			(moca_irq_status(priv, NO_FLUSH_IRQ)
+			& M2H_ASSERT_CPU0)) {
+			/* do not retry - message is gone forever */
+			err = 0;
+			msg = "core_req overwritten by assertion";
+			goto bad;
+		}
+		moca_write_mem(priv, reply_offset + priv->mbx_offset[cpuid],
+			reply, rw << 2);
+		moca_ringbell(priv, priv->regs->h2m_resp_bit[cpuid]);
+	}
+
+	if (attach) {
+		moca_attach_tail(priv, ml, &priv->core_msg_pend_list);
+		wake_up(&priv->core_msg_wq);
+	}
+
+	return 0;
+
+bad:
+	dev_warn(priv->dev, "%s\n", msg);
+
+	if (ml)
+		moca_attach_tail(priv, ml, &priv->core_msg_free_list);
+
+	return err;
+}
+
+static int moca_h2m_sanity_check(struct moca_priv_data *priv,
+	struct moca_host_msg *m)
+{
+	unsigned int w, num_ies;
+	u32 data;
+
+	if (priv->mmp_20) {
+		/* The length is stored in data[1]
+		   plus 8 extra header bytes */
+		data = be32_to_cpu(m->data[1]) + 8;
+		if (data > priv->host_req_size)
+			return -1;
+		else
+			return (int) data;
+	} else {
+		data = be32_to_cpu(m->data[0]);
+		num_ies = data & 0xffff;
+
+		w = 1;
+		while (num_ies) {
+			if (w >= (m->len << 2))
+				return -1;
+
+			data = be32_to_cpu(m->data[w++]);
+
+			if (data & 3)
+				return -1;
+			w += (data & 0xffff) >> 2;
+			num_ies--;
+		}
+		return w << 2;
+	}
+}
+
+/* Must have dev_mutex when calling this function */
+static int moca_sendmsg(struct moca_priv_data *priv, u32 cpuid)
+{
+	struct list_head *ml = NULL;
+	struct moca_host_msg *m;
+
+	if (priv->host_mbx_busy == 1)
+		return -1;
+
+	ml = moca_detach_head(priv, &priv->host_msg_pend_list);
+	if (ml == NULL)
+		return -EAGAIN;
+	m = list_entry(ml, struct moca_host_msg, chain);
+
+	moca_write_mem(priv, priv->mbx_offset[cpuid] + priv->host_req_offset,
+		m->data, m->len);
+
+	moca_ringbell(priv, priv->regs->h2m_req_bit[cpuid]);
+	moca_attach_tail(priv, ml, &priv->host_msg_free_list);
+	wake_up(&priv->host_msg_wq);
+
+	return 0;
+}
+
+/* Must have dev_mutex when calling this function */
+static int moca_wdt(struct moca_priv_data *priv, u32 cpu)
+{
+	struct list_head *ml = NULL;
+	struct moca_core_msg *m;
+
+	ml = moca_detach_head(priv, &priv->core_msg_free_list);
+	if (ml == NULL) {
+		dev_warn(priv->dev, "no entries left on core_msg_free_list\n");
+		return -ENOMEM;
+	}
+
+	if (priv->mmp_20) {
+		/*
+		 * generate phony wdt message to pass to the user
+		 * type = 0x03 (trap)
+		 * IE type = 0x11003 (wdt), 4 bytes length
+		 */
+		m = list_entry(ml, struct moca_core_msg, chain);
+		m->data[0] = cpu_to_be32(0x3);
+		m->data[1] = cpu_to_be32(4);
+		m->data[2] = cpu_to_be32(0x11003);
+		m->len = 12;
+	} else {
+		/*
+		 * generate phony wdt message to pass to the user
+		 * type = 0x09 (trap)
+		 * IE type = 0xff01 (wdt), 4 bytes length
+		 */
+		m = list_entry(ml, struct moca_core_msg, chain);
+		m->data[0] = cpu_to_be32(0x09000001);
+		m->data[1] = cpu_to_be32((MOCA_IE_WDT << 16) | 4);
+		m->data[2] = cpu_to_be32(cpu);
+		m->len = 12;
+	}
+
+	moca_attach_tail(priv, ml, &priv->core_msg_pend_list);
+	wake_up(&priv->core_msg_wq);
+
+	return 0;
+}
+
+static int moca_get_mbx_offset(struct moca_priv_data *priv)
+{
+	const struct moca_regs *r = priv->regs;
+	uintptr_t base;
+
+	if (priv->mbx_offset[1] == -1) {
+		if (moca_is_20(priv))
+			base = MOCA_RD(priv->base +
+				r->moca2host_mmp_inbox_0_offset) &
+				0x1fffffff;
+		else
+			base = MOCA_RD(priv->base + r->gp0_offset) &
+				0x1fffffff;
+
+		if ((base == 0) ||
+			(base >= r->cntl_mem_size + r->cntl_mem_offset) ||
+			(base & 0x07)) {
+			dev_warn(priv->dev,
+				 "can't get mailbox base CPU 1 (%X)\n",
+				 (int)base);
+			return -1;
+		}
+		priv->mbx_offset[1] = base;
+	}
+
+	if ((priv->mbx_offset[0] == -1) && moca_is_20(priv) && priv->mmp_20) {
+		base = MOCA_RD(priv->base +
+			r->moca2host_mmp_inbox_2_offset) &
+			0x1fffffff;
+		if ((base == 0) ||
+			(base >= r->cntl_mem_size + r->cntl_mem_offset) ||
+			(base & 0x07)) {
+			dev_warn(priv->dev,
+				 "can't get mailbox base CPU 0 (%X)\n",
+				 (int)base);
+			return -1;
+		}
+
+		priv->mbx_offset[0] = base;
+	}
+
+	return 0;
+}
+
+/*
+ * INTERRUPT / WORKQUEUE BH
+ */
+
+static void moca_work_handler(struct work_struct *work)
+{
+	struct moca_priv_data *priv =
+		container_of(work, struct moca_priv_data, work);
+	u32 mask = 0;
+	int ret, stopped = 0;
+
+	if (priv->enabled) {
+		mask = moca_irq_status(priv, FLUSH_IRQ);
+		if (mask & M2H_DMA) {
+			mask &= ~M2H_DMA;
+			complete(&priv->copy_complete);
+		}
+
+		if (mask & M2H_NEXTCHUNK) {
+			mask &= ~M2H_NEXTCHUNK;
+			complete(&priv->chunk_complete);
+		}
+
+		if (moca_is_20(priv) && mask & M2H_NEXTCHUNK_CPU0) {
+			mask &= ~M2H_NEXTCHUNK_CPU0;
+			complete(&priv->chunk_complete);
+		}
+
+		if (mask == 0) {
+			moca_enable_irq(priv);
+			return;
+		}
+
+		if (mask & (M2H_REQ | M2H_RESP |
+			M2H_REQ_CPU0 | M2H_RESP_CPU0)) {
+			if (moca_get_mbx_offset(priv)) {
+				/* mbx interrupt but mbx_offset is bogus?? */
+				moca_enable_irq(priv);
+				return;
+			}
+		}
+	}
+
+	mutex_lock(&priv->dev_mutex);
+
+	if (!priv->running) {
+		stopped = 1;
+	} else {
+		/* fatal events */
+		if (mask & M2H_ASSERT) {
+			ret = moca_recvmsg(priv, priv->core_req_offset,
+				priv->core_req_size, 0, 1);
+			if (ret == -ENOMEM)
+				priv->assert_pending = 2;
+		}
+		if (mask & M2H_ASSERT_CPU0) {
+			ret = moca_recvmsg(priv, priv->core_req_offset,
+				priv->core_req_size, 0, 0);
+			if (ret == -ENOMEM)
+				priv->assert_pending = 1;
+		}
+		/* M2H_WDT_CPU1 is mapped to the only CPU for MoCA11 HW */
+		if (mask & M2H_WDT_CPU1) {
+			ret = moca_wdt(priv, 2);
+			if (ret == -ENOMEM)
+				priv->wdt_pending |= BIT(1);
+			stopped = 1;
+		}
+		if (moca_is_20(priv) && mask & M2H_WDT_CPU0) {
+			ret = moca_wdt(priv, 1);
+			if (ret == -ENOMEM)
+				priv->wdt_pending |= BIT(0);
+			stopped = 1;
+		}
+	}
+	if (stopped) {
+		priv->running = 0;
+		priv->core_req_pending = 0;
+		priv->host_resp_pending = 0;
+		priv->host_mbx_busy = 1;
+		mutex_unlock(&priv->dev_mutex);
+		wake_up(&priv->core_msg_wq);
+		return;
+	}
+
+	/* normal events */
+	if (mask & M2H_REQ) {
+		ret = moca_recvmsg(priv, priv->core_req_offset,
+			priv->core_req_size, priv->core_resp_offset, 1);
+		if (ret == -ENOMEM)
+			priv->core_req_pending = 2;
+	}
+	if (mask & M2H_RESP) {
+		ret = moca_recvmsg(priv, priv->host_resp_offset,
+			priv->host_resp_size, 0, 1);
+		if (ret == -ENOMEM)
+			priv->host_resp_pending = 2;
+		if (ret == 0) {
+			priv->host_mbx_busy = 0;
+			moca_sendmsg(priv, 1);
+		}
+	}
+
+	if (mask & M2H_REQ_CPU0) {
+		ret = moca_recvmsg(priv, priv->core_req_offset,
+			priv->core_req_size, priv->core_resp_offset, 0);
+		if (ret == -ENOMEM)
+			priv->core_req_pending = 1;
+	}
+	if (mask & M2H_RESP_CPU0) {
+		ret = moca_recvmsg(priv, priv->host_resp_offset,
+			priv->host_resp_size, 0, 0);
+		if (ret == -ENOMEM)
+			priv->host_resp_pending = 1;
+		if (ret == 0) {
+			priv->host_mbx_busy = 0;
+			moca_sendmsg(priv, 0);
+		}
+	}
+	mutex_unlock(&priv->dev_mutex);
+
+	moca_enable_irq(priv);
+}
+
+static irqreturn_t moca_interrupt(int irq, void *arg)
+{
+	struct moca_priv_data *priv = arg;
+
+	if (1) {
+		u32 mask = moca_irq_status(priv, FLUSH_DMA_ONLY);
+
+		/* need to handle DMA completions ASAP */
+		if (mask & M2H_DMA) {
+			complete(&priv->copy_complete);
+			mask &= ~M2H_DMA;
+		}
+		if (mask & M2H_NEXTCHUNK) {
+			complete(&priv->chunk_complete);
+			mask &= ~M2H_NEXTCHUNK;
+		}
+
+		if (!mask)
+			return IRQ_HANDLED;
+	}
+	moca_disable_irq(priv);
+	schedule_work(&priv->work);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * BCM3450 ACCESS VIA I2C
+ */
+
+static int moca_3450_wait(struct moca_priv_data *priv)
+{
+	struct bsc_regs *bsc = priv->i2c_base;
+	long timeout = HZ / 1000;	/* 1ms */
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait);
+	int i = 0;
+
+	do {
+		if (I2C_RD(&bsc->iic_enable) & 2) {
+			I2C_WR(&bsc->iic_enable, 0);
+			return 0;
+		}
+		if (i++ > 50) {
+			I2C_WR(&bsc->iic_enable, 0);
+			dev_warn(priv->dev, "3450 I2C timed out\n");
+			return -1;
+		}
+		sleep_on_timeout(&wait, timeout ? timeout : 1);
+	} while (1);
+}
+
+static void moca_3450_write_i2c(struct moca_priv_data *priv, u8 addr, u32 data)
+{
+	struct bsc_regs *bsc = priv->i2c_base;
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+
+	I2C_WR(&bsc->iic_enable, 0);
+	I2C_WR(&bsc->chip_address, pd->bcm3450_i2c_addr << 1);
+	I2C_WR(&bsc->data_in[0], (addr >> 2) | (data << 8));
+	I2C_WR(&bsc->data_in[1], data >> 24);
+	I2C_WR(&bsc->cnt_reg, (5 << 0) | (0 << 6)); /* 5B out, 0B in */
+	I2C_WR(&bsc->ctl_reg, (1 << 4) | (0 << 0)); /* write only, 390kHz */
+	I2C_WR(&bsc->ctlhi_reg, (1 << 6));          /* 32-bit words */
+	I2C_WR(&bsc->iic_enable, 1);
+
+	moca_3450_wait(priv);
+}
+
+static u32 moca_3450_read_i2c(struct moca_priv_data *priv, u8 addr)
+{
+	struct bsc_regs *bsc = priv->i2c_base;
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+
+	I2C_WR(&bsc->iic_enable, 0);
+	I2C_WR(&bsc->chip_address, pd->bcm3450_i2c_addr << 1);
+	I2C_WR(&bsc->data_in[0], (addr >> 2));
+	I2C_WR(&bsc->cnt_reg, (1 << 0) | (4 << 6));   /* 1B out then 4B in */
+	I2C_WR(&bsc->ctl_reg, (1 << 4) | (3 << 0));   /* write/read, 390kHz */
+	I2C_WR(&bsc->ctlhi_reg, (1 << 6));	      /* 32-bit words */
+	I2C_WR(&bsc->iic_enable, 1);
+
+	if (moca_3450_wait(priv) == 0)
+		return I2C_RD(&bsc->data_out[0]);
+	else
+		return 0xffffffff;
+}
+
+#define BCM3450_CHIP_ID		0x00
+#define BCM3450_CHIP_REV	0x04
+#define BCM3450_LNACNTL		0x14
+#define BCM3450_PACNTL		0x18
+#define BCM3450_MISC		0x1c
+
+static int moca_3450_get_reg(struct moca_priv_data *priv, unsigned int  *arg)
+{
+	struct moca_xfer x;
+	u32 *dst;
+	u32 val;
+
+	if (!priv->i2c_base)
+		return -ENODEV;
+
+	if (copy_from_user(&x, (void __user *)arg, sizeof(x)))
+		return -EFAULT;
+
+	dst = (u32 *)(unsigned long)x.buf;
+
+	mutex_lock(&priv->moca_i2c_mutex);
+	val = moca_3450_read(priv, x.moca_addr);
+	mutex_unlock(&priv->moca_i2c_mutex);
+
+	if (put_user(val, dst))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int moca_3450_set_reg(struct moca_priv_data *priv, unsigned int  *arg)
+{
+	struct moca_xfer x;
+	u32 val;
+
+	if (!priv->i2c_base)
+		return -ENODEV;
+
+	if (copy_from_user(&x, (void __user *)arg, sizeof(x)))
+		return -EFAULT;
+
+	mutex_lock(&priv->moca_i2c_mutex);
+
+	if (get_user(val, (u32 *)(unsigned long)x.buf))
+		return -EFAULT;
+
+	moca_3450_write(priv, x.moca_addr, val);
+	mutex_unlock(&priv->moca_i2c_mutex);
+
+	return 0;
+}
+
+static void moca_3450_init(struct moca_priv_data *priv, int action)
+{
+	u32 data;
+
+	/* some platforms connect the i2c directly to the MoCA core */
+	if (!priv->i2c_base)
+		return;
+
+	mutex_lock(&priv->moca_i2c_mutex);
+
+	if (action == MOCA_ENABLE) {
+		/* reset the 3450's I2C block */
+		moca_3450_write(priv, BCM3450_MISC,
+			moca_3450_read(priv, BCM3450_MISC) | 1);
+
+		/* verify chip ID */
+		data = moca_3450_read(priv, BCM3450_CHIP_ID);
+		if (data != 0x3450)
+			dev_warn(priv->dev, "invalid 3450 chip ID 0x%08x\n",
+				 data);
+
+		/* reset the 3450's deserializer */
+		data = moca_3450_read(priv, BCM3450_MISC);
+		data &= ~0x8000; /* power on PA/LNA */
+		moca_3450_write(priv, BCM3450_MISC, data | 2);
+		moca_3450_write(priv, BCM3450_MISC, data & ~2);
+
+		/* set new PA gain */
+		data = moca_3450_read(priv, BCM3450_PACNTL);
+
+		moca_3450_write(priv, BCM3450_PACNTL, (data & ~0x02007ffd) |
+			(0x09 << 11) |		/* RDEG */
+			(0x38 << 5) |		/* CURR_CONT */
+			(0x05 << 2));		/* CURR_FOLLOWER */
+
+		/* Set LNACNTRL to default value */
+		moca_3450_write(priv, BCM3450_LNACNTL, 0x4924);
+
+	} else {
+		/* power down the PA/LNA */
+		data = moca_3450_read(priv, BCM3450_MISC);
+		moca_3450_write(priv, BCM3450_MISC, data | 0x8000);
+
+		data = moca_3450_read(priv, BCM3450_PACNTL);
+		moca_3450_write(priv, BCM3450_PACNTL, data |
+			BIT(0) |	/* PA_PWRDWN */
+			BIT(25));	/* PA_SELECT_PWRUP_BSC */
+
+		data = moca_3450_read(priv, BCM3450_LNACNTL);
+		/* LNA_INBIAS=0, LNA_PWRUP_IIC=0: */
+		data &= ~((7<<12) | (1<<28));
+		/* LNA_SELECT_PWRUP_IIC=1: */
+		moca_3450_write(priv, BCM3450_LNACNTL, data | (1<<29));
+
+	}
+	mutex_unlock(&priv->moca_i2c_mutex);
+}
+
+/*
+ * FILE OPERATIONS
+ */
+
+static int moca_file_open(struct inode *inode, struct file *file)
+{
+	unsigned int minor = iminor(inode);
+	struct moca_priv_data *priv;
+
+	if ((minor > NUM_MINORS) || minor_tbl[minor] == NULL)
+		return -ENODEV;
+
+	file->private_data = priv = minor_tbl[minor];
+
+	mutex_lock(&priv->dev_mutex);
+	priv->refcount++;
+	mutex_unlock(&priv->dev_mutex);
+	return 0;
+}
+
+static int moca_file_release(struct inode *inode, struct file *file)
+{
+	struct moca_priv_data *priv = file->private_data;
+
+	mutex_lock(&priv->dev_mutex);
+	priv->refcount--;
+	if (priv->refcount == 0 && priv->running == 1) {
+		/* last user closed the device */
+		moca_msg_reset(priv);
+		moca_hw_init(priv, MOCA_DISABLE);
+	}
+	mutex_unlock(&priv->dev_mutex);
+	return 0;
+}
+
+static int moca_ioctl_readmem(struct moca_priv_data *priv,
+	unsigned long xfer_uaddr)
+{
+	struct moca_xfer x;
+	uintptr_t i, src;
+	u32 *dst;
+
+	if (copy_from_user(&x, (void __user *)xfer_uaddr, sizeof(x)))
+		return -EFAULT;
+
+	if (moca_range_ok(priv, x.moca_addr, x.len) < 0)
+		return -EINVAL;
+
+	src = (uintptr_t)priv->base + x.moca_addr;
+	dst = (void *)(unsigned long)x.buf;
+
+	for (i = 0; i < x.len; i += 4, src += 4, dst++)
+		if (put_user(cpu_to_be32(MOCA_RD(src)), dst))
+			return -EFAULT;
+
+	return 0;
+}
+
+static int moca_ioctl_writemem(struct moca_priv_data *priv,
+	unsigned long xfer_uaddr)
+{
+	struct moca_xfer x;
+	uintptr_t i, dst;
+	u32 *src;
+
+	if (copy_from_user(&x, (void __user *)xfer_uaddr, sizeof(x)))
+		return -EFAULT;
+
+	if (moca_range_ok(priv, x.moca_addr, x.len) < 0)
+		return -EINVAL;
+
+	dst = (uintptr_t)priv->base + x.moca_addr;
+	src = (void *)(unsigned long)x.buf;
+
+	for (i = 0; i < x.len; i += 4, src++, dst += 4) {
+		unsigned int x;
+		if (get_user(x, src))
+			return -EFAULT;
+
+		MOCA_WR(dst, cpu_to_be32(x));
+	}
+
+	return 0;
+}
+
+/* legacy ioctl - DEPRECATED */
+static int moca_ioctl_get_drv_info_v2(struct moca_priv_data *priv,
+	unsigned long arg)
+{
+	struct moca_kdrv_info_v2 info;
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+
+	memset(&info, 0, sizeof(info));
+	info.version = DRV_VERSION;
+	info.build_number = DRV_BUILD_NUMBER;
+	info.builtin_fw = !!bmoca_fw_image;
+
+	info.uptime = (jiffies - priv->start_time) / HZ;
+	info.refcount = priv->refcount;
+	if (moca_is_20(priv))
+		info.gp1 = priv->running ? MOCA_RD(priv->base +
+			priv->regs->moca2host_mmp_inbox_1_offset) : 0;
+	else
+		info.gp1 = priv->running ?
+			MOCA_RD(priv->base + priv->regs->gp1_offset) : 0;
+
+	memcpy(info.enet_name, pd->enet_name, MOCA_IFNAMSIZ);
+
+	info.enet_id = -1;
+	info.macaddr_hi = pd->macaddr_hi;
+	info.macaddr_lo = pd->macaddr_lo;
+	info.hw_rev = pd->chip_id;
+	info.rf_band = pd->rf_band;
+
+
+	if (copy_to_user((void *)arg, &info, sizeof(info)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int moca_ioctl_get_drv_info(struct moca_priv_data *priv,
+	unsigned long arg)
+{
+	struct moca_kdrv_info info;
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+
+	memset(&info, 0, sizeof(info));
+	info.version = DRV_VERSION;
+	info.build_number = DRV_BUILD_NUMBER;
+	info.builtin_fw = !!bmoca_fw_image;
+
+	info.uptime = (jiffies - priv->start_time) / HZ;
+	info.refcount = priv->refcount;
+	if (moca_is_20(priv))
+		info.gp1 = priv->running ? MOCA_RD(priv->base +
+			priv->regs->moca2host_mmp_inbox_1_offset) : 0;
+	else
+		info.gp1 = priv->running ?
+			MOCA_RD(priv->base + priv->regs->gp1_offset) : 0;
+
+	info.macaddr_hi = pd->macaddr_hi;
+	info.macaddr_lo = pd->macaddr_lo;
+	info.chip_id = pd->chip_id;
+	info.hw_rev = pd->hw_rev;
+	info.rf_band = pd->rf_band;
+	info.phy_freq = priv->phy_freq;
+
+	if (priv->enet_pdev && get_device(&priv->enet_pdev->dev)) {
+		struct net_device *enet_dev;
+		rcu_read_lock();
+		enet_dev = platform_get_drvdata(priv->enet_pdev);
+		if (enet_dev) {
+			dev_hold(enet_dev);
+			strlcpy(info.enet_name, enet_dev->name, IFNAMSIZ);
+			dev_put(enet_dev);
+		}
+		rcu_read_unlock();
+		put_device(&priv->enet_pdev->dev);
+		info.enet_id = MOCA_IFNAME_USE_ID;
+	} else {
+		strlcpy(info.enet_name, pd->enet_name, IFNAMSIZ);
+		info.enet_id = pd->enet_id;
+	}
+
+	if (copy_to_user((void *)arg, &info, sizeof(info)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int moca_ioctl_check_for_data(struct moca_priv_data *priv,
+	unsigned long arg)
+{
+	int data_avail = 0;
+	int ret;
+	u32 mask;
+
+	moca_disable_irq(priv);
+
+	moca_get_mbx_offset(priv);
+
+	/* If an IRQ is pending, process it here rather than waiting for it to
+	   ensure the results are ready. Clear the ones we are currently
+	   processing */
+	mask = moca_irq_status(priv, FLUSH_REQRESP_ONLY);
+
+	if (mask & M2H_REQ) {
+		ret = moca_recvmsg(priv, priv->core_req_offset,
+			priv->core_req_size, priv->core_resp_offset, 1);
+		if (ret == -ENOMEM)
+			priv->core_req_pending = 2;
+	}
+	if (mask & M2H_RESP) {
+		ret = moca_recvmsg(priv, priv->host_resp_offset,
+			priv->host_resp_size, 0, 1);
+		if (ret == -ENOMEM)
+			priv->host_resp_pending = 2;
+		if (ret == 0) {
+			priv->host_mbx_busy = 0;
+			moca_sendmsg(priv, 1);
+		}
+	}
+
+	if (mask & M2H_REQ_CPU0) {
+		ret = moca_recvmsg(priv, priv->core_req_offset,
+			priv->core_req_size, priv->core_resp_offset, 0);
+		if (ret == -ENOMEM)
+			priv->core_req_pending = 1;
+	}
+	if (mask & M2H_RESP_CPU0) {
+		ret = moca_recvmsg(priv, priv->host_resp_offset,
+			priv->host_resp_size, 0, 0);
+		if (ret == -ENOMEM)
+			priv->host_resp_pending = 1;
+		if (ret == 0) {
+			priv->host_mbx_busy = 0;
+			moca_sendmsg(priv, 0);
+		}
+	}
+
+	moca_enable_irq(priv);
+
+	spin_lock_bh(&priv->list_lock);
+	data_avail = !list_empty(&priv->core_msg_pend_list);
+	spin_unlock_bh(&priv->list_lock);
+
+	if (copy_to_user((void *)arg, &data_avail, sizeof(data_avail)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static long moca_file_ioctl(struct file *file, unsigned int cmd,
+	unsigned long arg)
+{
+	struct moca_priv_data *priv = file->private_data;
+	struct moca_start start;
+	long ret = -ENOTTY;
+
+	mutex_lock(&priv->dev_mutex);
+
+	switch (cmd) {
+	case MOCA_IOCTL_START:
+		ret = clk_set_rate(priv->phy_clk, DEFAULT_PHY_CLOCK);
+		/* FIXME: this fails on some platforms, so ignore the value */
+		ret = 0;
+		if (ret < 0)
+			break;
+
+		if (copy_from_user(&start, (void __user *)arg, sizeof(start)))
+			ret = -EFAULT;
+
+		if (ret >= 0) {
+			priv->bonded_mode =
+				(start.boot_flags & MOCA_BOOT_FLAGS_BONDED);
+			if (!priv->enabled) {
+				moca_msg_reset(priv);
+				moca_hw_init(priv, MOCA_ENABLE);
+				moca_3450_init(priv, MOCA_ENABLE);
+				moca_irq_status(priv, FLUSH_IRQ);
+				moca_mmp_init(priv, 0);
+			}
+
+			ret = moca_write_img(priv, &start.x);
+			if (ret >= 0)
+				priv->running = 1;
+		}
+		break;
+	case MOCA_IOCTL_STOP:
+		moca_msg_reset(priv);
+		moca_3450_init(priv, MOCA_DISABLE);
+		moca_hw_init(priv, MOCA_DISABLE);
+		ret = 0;
+		break;
+	case MOCA_IOCTL_READMEM:
+		if (priv->running)
+			ret = moca_ioctl_readmem(priv, arg);
+		break;
+	case MOCA_IOCTL_WRITEMEM:
+		if (priv->running)
+			ret = moca_ioctl_writemem(priv, arg);
+		break;
+	case MOCA_IOCTL_GET_DRV_INFO_V2:
+		ret = moca_ioctl_get_drv_info_v2(priv, arg);
+		break;
+	case MOCA_IOCTL_GET_DRV_INFO:
+		ret = moca_ioctl_get_drv_info(priv, arg);
+		break;
+	case MOCA_IOCTL_CHECK_FOR_DATA:
+		if (priv->running)
+			ret = moca_ioctl_check_for_data(priv, arg);
+		else
+			ret = -EIO;
+		break;
+	case MOCA_IOCTL_WOL:
+		priv->wol_enabled = (int)arg;
+		dev_info(priv->dev, "WOL is %s\n",
+			priv->wol_enabled ? "enabled" : "disabled");
+		ret = 0;
+		break;
+	case MOCA_IOCTL_SET_CPU_RATE:
+		if (!priv->cpu_clk)
+			ret = -EIO;
+		else
+			ret = clk_set_rate(priv->cpu_clk,
+						     (unsigned int)arg);
+		break;
+	case MOCA_IOCTL_SET_PHY_RATE:
+		if (!priv->phy_clk)
+			ret = -EIO;
+		else
+			ret = clk_set_rate(priv->phy_clk,
+						     (unsigned int)arg);
+		break;
+	case MOCA_IOCTL_GET_3450_REG:
+		ret = moca_3450_get_reg(priv, (unsigned int *)arg);
+		break;
+	case MOCA_IOCTL_SET_3450_REG:
+		ret = moca_3450_set_reg(priv, (unsigned int *)arg);
+		break;
+
+	}
+	mutex_unlock(&priv->dev_mutex);
+
+	return ret;
+}
+
+static ssize_t moca_file_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	struct moca_priv_data *priv = file->private_data;
+	DECLARE_WAITQUEUE(wait, current);
+	struct list_head *ml = NULL;
+	struct moca_core_msg *m = NULL;
+	ssize_t ret;
+	int empty_free_list = 0;
+
+	if (count < priv->core_req_size)
+		return -EINVAL;
+
+	add_wait_queue(&priv->core_msg_wq, &wait);
+	do {
+		__set_current_state(TASK_INTERRUPTIBLE);
+
+		ml = moca_detach_head(priv, &priv->core_msg_pend_list);
+		if (ml != NULL) {
+			m = list_entry(ml, struct moca_core_msg, chain);
+			ret = 0;
+			break;
+		}
+		if (file->f_flags & O_NONBLOCK) {
+			ret = -EAGAIN;
+			break;
+		}
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+		schedule();
+	} while (1);
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&priv->core_msg_wq, &wait);
+
+	if (ret < 0)
+		return ret;
+
+	if (copy_to_user(buf, m->data, m->len))
+		ret = -EFAULT;	/* beware: message will be dropped */
+	else
+		ret = m->len;
+
+	spin_lock_bh(&priv->list_lock);
+	if (list_empty(&priv->core_msg_free_list))
+		empty_free_list = 1;
+	list_add_tail(ml, &priv->core_msg_free_list);
+	spin_unlock_bh(&priv->list_lock);
+
+	if (empty_free_list) {
+		/*
+		 * we just freed up space for another message, so if there was
+		 * a backlog, clear it out
+		 */
+		mutex_lock(&priv->dev_mutex);
+
+		if (moca_get_mbx_offset(priv)) {
+			mutex_unlock(&priv->dev_mutex);
+			return -EIO;
+		}
+
+		if (priv->assert_pending & 2) {
+			if (moca_recvmsg(priv, priv->core_req_offset,
+				priv->core_req_size, 0, 1) != -ENOMEM)
+				priv->assert_pending &= ~2;
+			else
+				dev_warn(priv->dev,
+					 "moca_recvmsg assert failed\n");
+		}
+		if (priv->assert_pending & 1) {
+			if (moca_recvmsg(priv, priv->core_req_offset,
+				priv->core_req_size, 0, 0) != -ENOMEM)
+				priv->assert_pending &= ~1;
+			else
+				dev_warn(priv->dev,
+					 "moca_recvmsg assert failed\n");
+		}
+		if (priv->wdt_pending)
+			if (moca_wdt(priv, priv->wdt_pending) != -ENOMEM)
+				priv->wdt_pending = 0;
+
+		if (priv->core_req_pending & 1) {
+			if (moca_recvmsg(priv, priv->core_req_offset,
+				priv->core_req_size, priv->core_resp_offset, 0)
+				!= -ENOMEM)
+				priv->core_req_pending &= ~1;
+			else
+				dev_warn(priv->dev,
+					 "moca_recvmsg core_req failed\n");
+		}
+		if (priv->core_req_pending & 2) {
+			if (moca_recvmsg(priv, priv->core_req_offset,
+				priv->core_req_size, priv->core_resp_offset, 1)
+				!= -ENOMEM)
+				priv->core_req_pending &= ~2;
+			else
+				dev_warn(priv->dev,
+					 "moca_recvmsg core_req failed\n");
+		}
+		if (priv->host_resp_pending & 1) {
+			if (moca_recvmsg(priv, priv->host_resp_offset,
+				priv->host_resp_size, 0, 0) != -ENOMEM)
+				priv->host_resp_pending &= ~1;
+			else
+				dev_warn(priv->dev,
+					 "moca_recvmsg host_resp failed\n");
+		}
+		if (priv->host_resp_pending & 2) {
+			if (moca_recvmsg(priv, priv->host_resp_offset,
+				priv->host_resp_size, 0, 1) != -ENOMEM)
+				priv->host_resp_pending &= ~2;
+			else
+				dev_warn(priv->dev,
+					 "moca_recvmsg host_resp failed\n");
+		}
+		mutex_unlock(&priv->dev_mutex);
+	}
+
+	return ret;
+}
+
+static ssize_t moca_file_write(struct file *file, const char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	struct moca_priv_data *priv = file->private_data;
+	DECLARE_WAITQUEUE(wait, current);
+	struct list_head *ml = NULL;
+	struct moca_host_msg *m = NULL;
+	ssize_t ret;
+	u32 cpuid;
+
+	if (count > priv->host_req_size)
+		return -EINVAL;
+
+	add_wait_queue(&priv->host_msg_wq, &wait);
+	do {
+		__set_current_state(TASK_INTERRUPTIBLE);
+
+		ml = moca_detach_head(priv, &priv->host_msg_free_list);
+		if (ml != NULL) {
+			m = list_entry(ml, struct moca_host_msg, chain);
+			ret = 0;
+			break;
+		}
+		if (file->f_flags & O_NONBLOCK) {
+			ret = -EAGAIN;
+			break;
+		}
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+		schedule();
+	} while (1);
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&priv->host_msg_wq, &wait);
+
+	if (ret < 0)
+		return ret;
+
+	m->len = count;
+
+	if (copy_from_user(m->data, buf, m->len)) {
+		ret = -EFAULT;
+		goto bad;
+	}
+
+	ret = moca_h2m_sanity_check(priv, m);
+	if (ret < 0) {
+		ret = -EINVAL;
+		goto bad;
+	}
+
+	moca_attach_tail(priv, ml, &priv->host_msg_pend_list);
+
+	if (!priv->mmp_20)
+		cpuid = 1;
+	else {
+		if (cpu_to_be32(m->data[0]) & 0x10)
+			cpuid = 0;
+		else
+			cpuid = 1;
+	}
+	mutex_lock(&priv->dev_mutex);
+	if (priv->running) {
+		if (moca_get_mbx_offset(priv))
+			ret = -EIO;
+		else
+			moca_sendmsg(priv, cpuid);
+	} else
+		ret = -EIO;
+	mutex_unlock(&priv->dev_mutex);
+
+	return ret;
+
+bad:
+	moca_attach_tail(priv, ml, &priv->host_msg_free_list);
+
+	return ret;
+}
+
+static unsigned int moca_file_poll(struct file *file, poll_table *wait)
+{
+	struct moca_priv_data *priv = file->private_data;
+	unsigned int ret = 0;
+
+	poll_wait(file, &priv->core_msg_wq, wait);
+	poll_wait(file, &priv->host_msg_wq, wait);
+
+	spin_lock_bh(&priv->list_lock);
+	if (!list_empty(&priv->core_msg_pend_list))
+		ret |= POLLIN | POLLRDNORM;
+	if (!list_empty(&priv->host_msg_free_list))
+		ret |= POLLOUT | POLLWRNORM;
+	spin_unlock_bh(&priv->list_lock);
+
+	return ret;
+}
+
+static const struct file_operations moca_fops = {
+	.owner =		THIS_MODULE,
+	.open =			moca_file_open,
+	.release =		moca_file_release,
+	.unlocked_ioctl =	moca_file_ioctl,
+	.read =			moca_file_read,
+	.write =		moca_file_write,
+	.poll =			moca_file_poll,
+};
+
+/*
+ * PLATFORM DRIVER
+ */
+#ifdef CONFIG_OF
+static int moca_parse_dt_node(struct moca_priv_data *priv)
+{
+	struct platform_device *pdev = priv->pdev;
+	struct moca_platform_data pd;
+	struct device_node *of_node = pdev->dev.of_node, *enet_node;
+	phandle enet_ph;
+	int status = 0, i = 0;
+	const u8 *macaddr;
+	const char *rfb;
+	const char *const of_rfb[MOCA_BAND_MAX + 1] = MOCA_BAND_NAMES;
+
+	memset(&pd, 0, sizeof(pd));
+
+	/* mandatory entries */
+	status = of_property_read_u32(of_node, "hw-rev", &pd.hw_rev);
+	if (status)
+		goto err;
+
+	status = of_property_read_u32(of_node, "enet-id", &enet_ph);
+	if (status)
+		goto err;
+	enet_node = of_find_node_by_phandle(enet_ph);
+	priv->enet_pdev = of_find_device_by_node(enet_node);
+	of_node_put(enet_node);
+	if (!priv->enet_pdev) {
+		dev_err(&pdev->dev,
+			"can't find associated network interface\n");
+		return -EINVAL;
+	}
+
+	macaddr = of_get_mac_address(of_node);
+	if (!macaddr) {
+		dev_err(&pdev->dev, "can't find MAC address\n");
+		return -EINVAL;
+	}
+
+	mac_to_u32(&pd.macaddr_hi, &pd.macaddr_lo, macaddr);
+
+	/* defaults for optional entries.  All other defaults are 0 */
+	pd.use_dma = 1;
+
+	status = of_property_read_string(of_node, "rf-band", &rfb);
+	if (!status) {
+		for (i = 0; i < MOCA_BAND_MAX; i++) {
+			if (strcmp(rfb, of_rfb[i]) == 0) {
+				pd.rf_band = i;
+				dev_info(&pdev->dev, "using %s(%d) band\n",
+					 of_rfb[i], i);
+				break;
+			}
+		}
+	}
+
+	if (status || i == MOCA_BAND_MAX) {
+		dev_warn(&pdev->dev, "Defaulting to rf-band %s\n", of_rfb[0]);
+		pd.rf_band = 0;
+	}
+
+	/* optional entries */
+	of_property_read_u32(of_node, "i2c-base", &pd.bcm3450_i2c_base);
+	of_property_read_u32(of_node, "i2c-addr", &pd.bcm3450_i2c_addr);
+	of_property_read_u32(of_node, "use-dma", &pd.use_dma);
+	of_property_read_u32(of_node, "use-spi", &pd.use_spi);
+	pd.chip_id = (BRCM_CHIP_ID() << 16) | (BRCM_CHIP_REV() + 0xa0);
+
+	status = platform_device_add_data(pdev, &pd, sizeof(pd));
+err:
+	return status;
+
+}
+
+static const struct of_device_id bmoca_instance_match[] = {
+	{ .compatible = "brcm,bmoca-instance" },
+	{},
+};
+
+MODULE_DEVICE_TABLE(bmoca, bmoca_instance_match);
+#endif
+
+static int moca_probe(struct platform_device *pdev)
+{
+	struct moca_priv_data *priv;
+	struct resource *mres, *ires;
+	int minor, err;
+	struct moca_platform_data *pd;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(&pdev->dev, "out of memory\n");
+		return -ENOMEM;
+	}
+	dev_set_drvdata(&pdev->dev, priv);
+	priv->pdev = pdev;
+	priv->start_time = jiffies;
+
+#if defined(CONFIG_OF)
+	err = moca_parse_dt_node(priv);
+	if (err)
+		goto bad;
+#endif
+	priv->clk = clk_get(&pdev->dev, "moca");
+	priv->cpu_clk = clk_get(&pdev->dev, "moca-cpu");
+	priv->phy_clk = clk_get(&pdev->dev, "moca-phy");
+
+	pd = pdev->dev.platform_data;
+	priv->hw_rev = pd->hw_rev;
+
+	if (pd->hw_rev == HWREV_MOCA_11_PLUS)
+		priv->regs = &regs_11_plus;
+	else if (pd->hw_rev == HWREV_MOCA_11_LITE)
+		priv->regs = &regs_11_lite;
+	else if (pd->hw_rev == HWREV_MOCA_11)
+		priv->regs = &regs_11;
+	else if ((pd->hw_rev == HWREV_MOCA_20_ALT) ||
+		(pd->hw_rev == HWREV_MOCA_20_GEN21) ||
+		(pd->hw_rev == HWREV_MOCA_20_GEN22) ||
+		(pd->hw_rev == HWREV_MOCA_20_GEN23))
+		priv->regs = &regs_20;
+	else {
+		dev_err(&pdev->dev, "unsupported MoCA HWREV: %x\n",
+			pd->hw_rev);
+		err = -EINVAL;
+		goto bad;
+	}
+
+	init_waitqueue_head(&priv->host_msg_wq);
+	init_waitqueue_head(&priv->core_msg_wq);
+	init_completion(&priv->copy_complete);
+	init_completion(&priv->chunk_complete);
+
+	spin_lock_init(&priv->list_lock);
+	spin_lock_init(&priv->clock_lock);
+	spin_lock_init(&priv->irq_status_lock);
+
+	mutex_init(&priv->dev_mutex);
+	mutex_init(&priv->copy_mutex);
+	mutex_init(&priv->moca_i2c_mutex);
+
+	sg_init_table(priv->fw_sg, MAX_FW_PAGES);
+
+	INIT_WORK(&priv->work, moca_work_handler);
+
+	priv->minor = -1;
+	for (minor = 0; minor < NUM_MINORS; minor++) {
+		if (minor_tbl[minor] == NULL) {
+			priv->minor = minor;
+			break;
+		}
+	}
+
+	if (priv->minor == -1) {
+		dev_err(&pdev->dev, "can't allocate minor device\n");
+		err = -EIO;
+		goto bad;
+	}
+
+	mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!mres || !ires) {
+		dev_err(&pdev->dev, "can't get resources\n");
+		err = -EIO;
+		goto bad;
+	}
+	priv->base = ioremap(mres->start, mres->end - mres->start + 1);
+	priv->irq = ires->start;
+
+	if (pd->bcm3450_i2c_base)
+		priv->i2c_base = ioremap(pd->bcm3450_i2c_base,
+			sizeof(struct bsc_regs));
+
+	/* leave core in reset until we get an ioctl */
+
+	moca_hw_reset(priv);
+
+	if (request_irq(priv->irq, moca_interrupt, 0, "moca",
+			priv) < 0) {
+		dev_err(&pdev->dev, "can't request interrupt\n");
+		err = -EIO;
+		goto bad2;
+	}
+	moca_hw_init(priv, MOCA_ENABLE);
+	moca_disable_irq(priv);
+	moca_msg_reset(priv);
+	moca_hw_init(priv, MOCA_DISABLE);
+
+	dev_info(&pdev->dev,
+		 "adding minor #%d@0x%08llx, IRQ %d, I2C 0x%08llx/0x%02x\n",
+		 priv->minor, (unsigned long long)mres->start, ires->start,
+		 (unsigned long long)pd->bcm3450_i2c_base,
+		 pd->bcm3450_i2c_addr);
+
+	minor_tbl[priv->minor] = priv;
+	priv->dev = device_create(moca_class, NULL,
+		MKDEV(MOCA_MAJOR, priv->minor), NULL, "bmoca%d", priv->minor);
+	if (IS_ERR(priv->dev)) {
+		dev_warn(&pdev->dev, "can't register class device\n");
+		priv->dev = NULL;
+	}
+
+	return 0;
+
+bad2:
+	if (priv->base)
+		iounmap(priv->base);
+	if (priv->i2c_base)
+		iounmap(priv->i2c_base);
+bad:
+	kfree(priv);
+	return err;
+}
+
+static int moca_remove(struct platform_device *pdev)
+{
+	struct moca_priv_data *priv = dev_get_drvdata(&pdev->dev);
+	struct clk *clk = priv->clk;
+	struct clk *phy_clk = priv->phy_clk;
+	struct clk *cpu_clk = priv->cpu_clk;
+
+	if (priv->dev)
+		device_destroy(moca_class, MKDEV(MOCA_MAJOR, priv->minor));
+	minor_tbl[priv->minor] = NULL;
+
+	free_irq(priv->irq, priv);
+	if (priv->i2c_base)
+		iounmap(priv->i2c_base);
+	if (priv->base)
+		iounmap(priv->base);
+	kfree(priv);
+
+	clk_put(cpu_clk);
+	clk_put(phy_clk);
+	clk_put(clk);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int moca_suspend(struct device *dev)
+{
+	/* do not do anything on suspend.
+	MoCA core is not necessarily stopped */
+
+	return 0;
+}
+
+static int moca_resume(struct device *dev)
+{
+	return 0;
+}
+
+static const struct dev_pm_ops moca_pm_ops = {
+	.suspend		= moca_suspend,
+	.resume			= moca_resume,
+};
+
+#endif
+
+static struct platform_driver moca_plat_drv = {
+	.probe =		moca_probe,
+	.remove =		moca_remove,
+	.driver = {
+		.name =		"bmoca",
+		.owner =	THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm =		&moca_pm_ops,
+#endif
+#ifdef CONFIG_OF
+		.of_match_table = of_match_ptr(bmoca_instance_match),
+#endif
+	},
+};
+
+static int moca_init(void)
+{
+	int ret;
+	memset(minor_tbl, 0, sizeof(minor_tbl));
+	ret = register_chrdev(MOCA_MAJOR, MOCA_CLASS, &moca_fops);
+	if (ret < 0) {
+		pr_err("can't register major %d\n", MOCA_MAJOR);
+		goto bad;
+	}
+
+	moca_class = class_create(THIS_MODULE, MOCA_CLASS);
+	if (IS_ERR(moca_class)) {
+		pr_err("can't create device class\n");
+		ret = PTR_ERR(moca_class);
+		goto bad2;
+	}
+
+	ret = platform_driver_register(&moca_plat_drv);
+	if (ret < 0) {
+		pr_err("can't register platform_driver\n");
+		goto bad3;
+	}
+
+
+
+	return 0;
+
+bad3:
+	class_destroy(moca_class);
+bad2:
+	unregister_chrdev(MOCA_MAJOR, MOCA_CLASS);
+bad:
+	return ret;
+}
+
+static void moca_exit(void)
+{
+	platform_driver_unregister(&moca_plat_drv);
+	class_destroy(moca_class);
+	unregister_chrdev(MOCA_MAJOR, MOCA_CLASS);
+}
+
+module_init(moca_init);
+module_exit(moca_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("MoCA messaging driver");
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..6538114
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,5 @@
+override EXTRA_CFLAGS += -I$M/../include -DCONFIG_BCM_6802_MoCA=1 -DDSL_MOCA=1
+
+all: modules
+
+obj-m += bmoca.o
diff --git a/bbsi.h b/bbsi.h
new file mode 100644
index 0000000..875c52d
--- /dev/null
+++ b/bbsi.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright (C) 2013 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef __BBSI_H
+#define __BBSI_H
+
+#include <linux/netdevice.h>
+#include <linux/spi/spi.h>
+
+#ifndef KSEG1
+#define KSEG1 0  // just to appease non-MIPS CPUs. Not really used.
+#endif
+
+#define BP_MOCA_MAX_NUM 1
+
+/*
+ * The exact values here don't matter, as they're translated into "real"
+ * values before talking to mocad.  This is just for the device registration
+ * tables.
+ */
+enum {
+	BP_MOCA_TYPE_WAN,
+	BP_MOCA_TYPE_LAN,
+};
+
+enum {
+	BP_MOCA_RF_BAND_D_LOW,
+	BP_MOCA_RF_BAND_D_HIGH,
+	BP_MOCA_RF_BAND_EXT_D,
+	BP_MOCA_RF_BAND_E,
+	BP_MOCA_RF_BAND_F,
+};
+
+typedef struct BpMocaInfo {
+	int type;
+	int rfBand;
+} BP_MOCA_INFO;
+
+static void BpGetMocaInfo(BP_MOCA_INFO *chips, int *nchips) {
+	if (*nchips >= 1) {
+		*nchips = 1;
+		chips[0].type = BP_MOCA_TYPE_LAN;
+		chips[0].rfBand = BP_MOCA_RF_BAND_E;
+	}
+}
+
+static uint32_t _spi_read32(struct spi_device *spi, uint32_t addr);
+
+
+// TODO(apenwarr): don't make this global.
+//   Or fix the driver to just only enable/disable interrupts at the right
+//   times.
+static int irq_disabled = 0;
+
+static void kerSysMocaHostIntrEnable(struct spi_device *spi) {
+	if (irq_disabled == 1) {
+		irq_disabled = 0;
+		enable_irq(spi->irq);
+	}
+}
+
+static void kerSysMocaHostIntrDisable(struct spi_device *spi) {
+	if (irq_disabled == 0) {
+		disable_irq_nosync(spi->irq);
+		irq_disabled = 1;
+	}
+}
+
+static uint8_t __pollstatus(struct spi_device *spi) {
+	uint8_t wclear[] = { 0x80, 0x06 };
+	uint8_t rdata[1] = { 0 };
+	struct spi_transfer t[2] = {
+		{ .tx_buf = wclear, .len = sizeof(wclear) },
+		{ .rx_buf = rdata, .len = sizeof(rdata) },
+	};
+	struct spi_message m;
+	int i;
+
+	spi_message_init(&m);
+	spi_message_add_tail(&t[0], &m);
+	spi_message_add_tail(&t[1], &m);
+
+	for (i = 0; i < 10; i++) {
+		if (spi_sync_locked(spi, &m) < 0) {
+			pr_warn("spi _pollstatus: SPI error\n");
+			return 0x01; // error code
+		}
+		if (rdata[0] & 0x01) {
+			pr_warn("spi _pollstatus: rbus error: %02X\n", rdata[0]);
+			return 0x01; // error result; stop polling now
+		}
+		if (!(rdata[0] & 0x10)) return 0;   // transaction finished
+	}
+	// if we get here, the transaction still isn't finished: weird
+	pr_warn("spi _pollstatus: still busy: %02X\n", rdata[0]);
+	return rdata[0];
+}
+
+static uint32_t __spi_read32a(struct spi_device *spi, uint32_t addr,
+				int speculative) {
+	uint8_t waddr[] = {
+		0x81, 0x07,
+		0x01 | (speculative ? 0x02 : 0),
+		0, 0, 0, 0 };
+	struct spi_transfer addrt[1] = {
+		{ .tx_buf = waddr, .len = sizeof(waddr) },
+	};
+	struct spi_message addrm;
+	int j, st;
+
+	spi_message_init(&addrm);
+	spi_message_add_tail(&addrt[0], &addrm);
+
+	__pollstatus(spi);
+	for (j = 0; j < 10; j++) {
+		// write address reg, which triggers the read
+		writel(cpu_to_be32(addr), waddr + sizeof(waddr) - 4);
+		if (spi_sync_locked(spi, &addrm) < 0) {
+			pr_warn("spi_read_addr: error\n");
+		}
+		st = __pollstatus(spi);
+		if (!st) break;
+	}
+	return st;
+}
+
+static uint32_t __spi_read32d_noswap(struct spi_device *spi) {
+	uint8_t wdata[] = { 0x80, 0x0c };
+	uint8_t rdata[4];
+	struct spi_transfer datat[2] = {
+		{ .tx_buf = wdata, .len = sizeof(wdata) },
+		{ .rx_buf = rdata, .len = sizeof(rdata) },
+	};
+	struct spi_message datam;
+
+	spi_message_init(&datam);
+	spi_message_add_tail(&datat[0], &datam);
+	spi_message_add_tail(&datat[1], &datam);
+
+	// retrieve actual data bits
+	if (spi_sync_locked(spi, &datam) < 0) {
+		pr_warn("spi_read_data: error\n");
+	}
+	return readl(rdata);
+}
+
+static uint32_t _spi_read32(struct spi_device *spi, uint32_t addr) {
+	int st;
+	uint32_t retval;
+
+	spi_bus_lock(spi->master);
+
+	st = __spi_read32a(spi, addr, 0);
+	if (st) {
+		retval = 0x00000000; // error
+	} else {
+		retval = be32_to_cpu(__spi_read32d_noswap(spi));
+	}
+	spi_bus_unlock(spi->master);
+	return retval;
+}
+
+static void __spi_write32a(struct spi_device *spi, uint32_t addr) {
+	uint8_t waddr[] = { 0x81, 0x07, 0x00, 0, 0, 0, 0  };
+	struct spi_transfer t[1] = {
+		{ .tx_buf = waddr, .len = sizeof(waddr) },
+	};
+	struct spi_message m;
+
+	spi_message_init(&m);
+	spi_message_add_tail(&t[0], &m);
+
+	// write address reg
+	writel(cpu_to_be32(addr), waddr + sizeof(waddr) - 4);
+	if (spi_sync_locked(spi, &m) < 0) {
+		pr_warn("spi_write: error\n");
+	}
+}
+
+static void __spi_write32d_noswap(struct spi_device *spi, uint32_t value) {
+	uint8_t wdata[] = { 0x81, 0x0c, 0, 0, 0, 0 };
+	struct spi_transfer t[1] = {
+		{ .tx_buf = wdata, .len = sizeof(wdata) },
+	};
+	struct spi_message m;
+
+	spi_message_init(&m);
+	spi_message_add_tail(&t[0], &m);
+
+	// write data reg
+	writel(value, wdata + sizeof(wdata) - 4);
+	if (spi_sync_locked(spi, &m) < 0) {
+		pr_warn("spi_write: error\n");
+	}
+}
+
+
+static void _spi_write32(struct spi_device *spi, uint32_t addr, uint32_t value) {
+	spi_bus_lock(spi->master);
+	__pollstatus(spi);
+	__spi_write32a(spi, addr);
+	__spi_write32d_noswap(spi, cpu_to_be32(value));
+	__pollstatus(spi);
+	spi_bus_unlock(spi->master);
+}
+
+static uint32_t kerSysBcmSpiSlaveReadReg32(struct spi_device *spi, uint32_t addr) {
+	return _spi_read32(spi, addr);
+}
+
+static void kerSysBcmSpiSlaveWriteReg32(struct spi_device *spi, uint32_t addr, uint32_t value) {
+	_spi_write32(spi, addr, value);
+}
+
+static void kerSysBcmSpiSlaveReadBuf(struct spi_device *spi, uint32_t addr, void *dst, int len, int wordsize) {
+	int i;
+	uint32_t *buf = dst;
+
+	spi_bus_lock(spi->master);
+
+	if (wordsize != 4) {
+		pr_info("SPI readbuf: only word size == 4 bytes is supported!\n");
+		return;
+	}
+	__spi_read32a(spi, addr, 1);
+	for (i = 0; i < len; i += wordsize) {
+		buf[i/4] = __spi_read32d_noswap(spi);
+		__pollstatus(spi);
+	}
+
+	spi_bus_unlock(spi->master);
+}
+
+static void kerSysBcmSpiSlaveWriteBuf(struct spi_device *spi, uint32_t addr, const void *src, int len, int wordsize) {
+	int i, nelems = len/4;
+	const uint32_t *buf = src;
+	uint8_t wdata[] = { 0x81, 0x0c };
+	struct spi_transfer *t, *tp;
+	struct spi_message m;
+
+	if (len > 8192) {
+		pr_warn("spi writebuf: buffer size %d is too large\n", len);
+		return;
+	}
+	if (wordsize != 4) {
+		pr_err("SPI writebuf: only word size == 4 bytes is supported!\n");
+		return;
+	}
+
+	t = kmalloc(nelems * sizeof(struct spi_transfer) * 2, GFP_KERNEL);
+	if (!t) {
+		pr_warn("spi writebuf: out of memory\n");
+		return;
+	}
+
+	memset(t, 0, nelems * sizeof(struct spi_transfer) * 2);
+	spi_message_init(&m);
+
+	for (i = 0, tp = t; i < nelems; i++) {
+		tp->tx_buf = wdata;
+		tp->len = sizeof(wdata);
+		spi_message_add_tail(tp, &m);
+		tp++;
+
+		tp->tx_buf = &buf[i];
+		tp->len = 4;
+		tp->cs_change = 1;
+		spi_message_add_tail(tp, &m);
+		tp++;
+	}
+
+	spi_bus_lock(spi->master);
+
+	__pollstatus(spi);
+	writel(cpu_to_be32(addr), wdata + 2);
+	__spi_write32a(spi, addr);
+	spi_sync_locked(spi, &m);
+	__pollstatus(spi);
+
+	spi_bus_unlock(spi->master);
+	kfree(t);
+}
+
+#endif // __BBSI_H
diff --git a/bmoca-6802.c b/bmoca-6802.c
new file mode 100644
index 0000000..22284ac
--- /dev/null
+++ b/bmoca-6802.c
@@ -0,0 +1,917 @@
+/*
+    <:copyright-BRCM:2013:DUAL/GPL:standard
+    
+       Copyright (c) 2013 Broadcom Corporation
+       All Rights Reserved
+    
+    Unless you and Broadcom execute a separate written software license
+    agreement governing use of this software, this software is licensed
+    to you under the terms of the GNU General Public License version 2
+    (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+    with the following added to such license:
+    
+       As a special exception, the copyright holders of this software give
+       you permission to link this software with independent modules, and
+       to copy and distribute the resulting executable under terms of your
+       choice, provided that you also meet, for each linked independent
+       module, the terms and conditions of the license of that module.
+       An independent module is a module which is not derived from this
+       software.  The special exception does not apply to any modifications
+       of the software.
+    
+    Not withstanding the above, under no circumstances may you combine
+    this software in any way with any other Broadcom software provided
+    under a license other than the GPL, without Broadcom's express prior
+    written consent.
+    
+    :> 
+
+*/
+
+#include "bbsi.h"
+#include <linux/spi/spi.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+#else
+typedef unsigned long uintptr_t;
+#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+
+#define MOCA_RD(x)    ((((struct moca_platform_data *)priv->pdev->dev.platform_data)->use_spi == 0) ? \
+                       (*((volatile uint32_t *)((unsigned long)(x)))) : \
+                       ((uint32_t)kerSysBcmSpiSlaveReadReg32(((struct moca_platform_data *)priv->pdev->dev.platform_data)->spi, (uint32_t)(x))))
+
+#define MOCA_RD8(x, y) ((((struct moca_platform_data *)priv->pdev->dev.platform_data)->use_spi == 0) ? \
+                        (*(y) = *((volatile unsigned char *)((unsigned long)(x)))) : \
+                        (kerSysBcmSpiSlaveRead(((struct moca_platform_data *)priv->pdev->dev.platform_data)->spi, (unsigned long)(x), y, 1)))
+
+#define MOCA_WR(x,y)   do { ((((struct moca_platform_data *)priv->pdev->dev.platform_data)->use_spi == 0) ? \
+                            (*((volatile uint32_t *)((unsigned long)(x)))) = (y) : \
+                            kerSysBcmSpiSlaveWriteReg32(((struct moca_platform_data *)priv->pdev->dev.platform_data)->spi, (uint32_t)(x), (y))); } while(0)
+
+#define MOCA_WR8(x,y)    do { ((((struct moca_platform_data *)priv->pdev->dev.platform_data)->use_spi == 0) ? \
+                               (*((volatile unsigned char *)((unsigned long)(x)))) = (unsigned char)(y) : \
+                               kerSysBcmSpiSlaveWrite(((struct moca_platform_data *)priv->pdev->dev.platform_data)->spi, (unsigned long)(x), (y), 1)); } while(0)
+
+#define MOCA_WR16(x,y)   do { ((((struct moca_platform_data *)priv->pdev->dev.platform_data)->use_spi == 0) ? \
+                               (*((volatile unsigned short *)((unsigned long)(x)))) = (unsigned short)(y) : \
+                               kerSysBcmSpiSlaveWrite(((struct moca_platform_data *)priv->pdev->dev.platform_data)->spi, (unsigned long)(x), (y), 2)); } while(0)
+
+#define MOCA_WR_BLOCK(addr, src, len) do { kerSysBcmSpiSlaveWriteBuf(((struct moca_platform_data *)priv->pdev->dev.platform_data)->spi, addr, src, len, 4); } while(0)
+#define MOCA_RD_BLOCK(addr, dst, len) do { kerSysBcmSpiSlaveReadBuf(((struct moca_platform_data *)priv->pdev->dev.platform_data)->spi, addr, dst, len, 4); } while(0)
+
+
+#define I2C_RD(x)		MOCA_RD(x)
+#define I2C_WR(x, y)		MOCA_WR(x, y)
+
+#define MOCA_BPCM_NUM         5
+#define MOCA_BPCM_ZONES_NUM   8
+
+typedef enum _PMB_COMMAND_E_
+{
+   PMB_COMMAND_ALL_OFF = 0,
+   PMB_COMMAND_ALL_ON,
+
+   PMB_COMMAND_LAST
+} PMB_COMMAND_E;
+
+static uint32_t zone_off_bitmask[MOCA_BPCM_NUM] = { 0xFF, 0x03, 0x30, 0x00, 0x00 };
+static uint32_t zone_on_bitmask[MOCA_BPCM_NUM]  = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
+static void bogus_release(struct device *dev)
+{
+}
+
+static struct moca_platform_data moca_lan_data = {
+	.macaddr_hi =		0x00000102,
+	.macaddr_lo =		0x03040000,
+
+	.bcm3450_i2c_base =  0x10406200,
+	.bcm3450_i2c_addr =  0x70,
+	.hw_rev  =     HWREV_MOCA_20_GEN22,
+	.rf_band =     MOCA_BAND_EXT_D,
+	.chip_id =     0,
+	.use_dma           = 0,
+	.use_spi           = 1,
+	.devId            = MOCA_DEVICE_ID_UNREGISTERED, // Filled in dynamically
+#ifdef CONFIG_SMP
+	.smp_processor_id = 1,
+#endif
+};
+
+static struct resource moca_lan_resources[] = {
+	[0] = {
+		.start = 0x10600000,
+		.end =   0x107ffd97,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = { /* Not used for 6802, define for bmoca */
+		.start = 0,
+		.end = 0,
+		.flags = IORESOURCE_IRQ,
+	}
+};
+
+static struct platform_device moca_lan_plat_dev = {
+	.name = "bmoca",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(moca_lan_resources),
+	.resource = moca_lan_resources,
+	.dev = {
+		.platform_data = &moca_lan_data,
+		.release = bogus_release,
+	},
+};
+
+static struct moca_platform_data moca_wan_data = {
+	.macaddr_hi       = 0x00000102,
+	.macaddr_lo       = 0x03040000,
+
+	.bcm3450_i2c_base =  0x10406200,
+	.bcm3450_i2c_addr =  0x70,
+	.hw_rev  = HWREV_MOCA_20_GEN22,
+	.chip_id = 0,
+	
+	.rf_band = MOCA_BAND_EXT_D,
+
+	.use_dma           = 0,
+	.use_spi           = 1,
+	.devId            = MOCA_DEVICE_ID_UNREGISTERED, // Filled in dynamically
+
+#ifdef CONFIG_SMP
+	.smp_processor_id = 1,
+#endif
+};
+
+static struct resource moca_wan_resources[] = {
+	[0] = {
+		.start = 0x10600000,
+		.end =   0x107ffd97,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = { /* Not used for 6802, define for bmoca */
+		.start = 0,
+		.end = 0,
+		.flags = IORESOURCE_IRQ,
+	}
+};
+
+static struct platform_device moca_wan_plat_dev = {
+	.name          = "bmoca",
+	.id            = 1,
+	.num_resources = ARRAY_SIZE(moca_wan_resources),
+	.resource      = moca_wan_resources,
+	.dev           = {
+		.platform_data = &moca_wan_data,
+		.release       = bogus_release,
+	},
+};
+
+static void moca_enable_irq(struct moca_priv_data *priv)
+{
+	kerSysMocaHostIntrEnable(((struct moca_platform_data *)priv->pdev->dev.platform_data)->spi);
+}
+
+static void moca_disable_irq(struct moca_priv_data *priv)
+{
+	kerSysMocaHostIntrDisable(((struct moca_platform_data *)priv->pdev->dev.platform_data)->spi);
+}
+
+static void moca_pmb_busy_wait(struct moca_priv_data *priv)
+{
+	#if 0
+	uint32_t data;
+
+	/* Possible time saver: The register access time over SPI may 
+	   always be enough to guarantee that the write will complete 
+	   in time without having to check the status. */
+	do
+	{
+		data = MOCA_RD(priv->base + priv->regs->pmb_master_status);
+	} while (data & 0x1);
+	#endif
+}
+
+static void moca_pmb_control(struct moca_priv_data *priv, PMB_COMMAND_E cmd)
+{
+	int i, j;
+	uint32_t * p_zone_control;
+	uint32_t data;
+
+	switch (cmd)
+	{
+		case PMB_COMMAND_ALL_OFF:
+			// Turn off zone command
+			MOCA_WR(priv->base + priv->regs->pmb_master_wdata_offset, 0xA00);
+			p_zone_control = &zone_off_bitmask[0];
+			break;
+		case PMB_COMMAND_ALL_ON:
+			// Turn on zone command
+			MOCA_WR(priv->base + priv->regs->pmb_master_wdata_offset, 0xC00);
+			p_zone_control = &zone_on_bitmask[0];
+			break;
+		default:
+			printk(KERN_WARNING "%s: illegal cmd: %08x\n",
+				__FUNCTION__, cmd);
+			return;
+	}
+
+	for (i = 0; i < MOCA_BPCM_NUM; i++)
+	{
+		for (j = 0; j < MOCA_BPCM_ZONES_NUM; j++)
+		{
+			if (*p_zone_control & (1 << j))
+			{
+				// zone address in bpcms
+				data = (0x1 << 20) + 16 + (i * 4096) + (j * 4);
+				MOCA_WR(priv->base + priv->regs->pmb_master_cmd_offset, data);
+				moca_pmb_busy_wait(priv);
+			}
+		}
+		p_zone_control++;
+	}
+
+}
+
+static void moca_pmb_give_fw_cntrl(struct moca_priv_data *priv)
+{
+	/* Pass control over the memories to the FW */
+	MOCA_WR(priv->base + priv->regs->pmb_master_wdata_offset, 0x1);
+	MOCA_WR(priv->base + priv->regs->pmb_master_cmd_offset, 0x100002);
+	moca_pmb_busy_wait(priv);
+}
+
+static void moca_hw_reset(struct moca_priv_data *priv)
+{
+//	unsigned long flags;
+//   uint32_t chipid;
+
+	/* disable and clear all interrupts */
+	MOCA_WR(priv->base + priv->regs->l2_mask_set_offset, 0xffffffff);
+	MOCA_RD(priv->base + priv->regs->l2_mask_set_offset);
+
+	/* assert resets */
+
+	/* reset CPU first, both CPUs for MoCA 20 HW */
+	if (priv->hw_rev == HWREV_MOCA_20_GEN22)
+		MOCA_SET(priv->base + priv->regs->sw_reset_offset, 5);
+	else
+		MOCA_SET(priv->base + priv->regs->sw_reset_offset, 1);
+
+	MOCA_RD(priv->base + priv->regs->sw_reset_offset);
+
+	udelay(20);
+
+	/* reset everything else except clocks */
+	MOCA_SET(priv->base + priv->regs->sw_reset_offset, ~((1 << 3) | (1 << 7)));
+	MOCA_RD(priv->base + priv->regs->sw_reset_offset);
+
+	udelay(20);
+
+	/* disable clocks */
+	MOCA_SET(priv->base + priv->regs->sw_reset_offset, ~(1 << 3));
+	MOCA_RD(priv->base + priv->regs->sw_reset_offset);
+
+	MOCA_WR(priv->base + priv->regs->l2_clear_offset, 0xffffffff);
+	MOCA_RD(priv->base + priv->regs->l2_clear_offset);
+
+	/* Power down all zones */
+	moca_pmb_control(priv, PMB_COMMAND_ALL_OFF);
+
+	/* Power down all SYS_CTRL memories */
+	MOCA_WR(0x10100068, 1);   // CLKGEN_PLL_SYS1_PLL_PWRDN
+	MOCA_SET(0x1010000c, 1);  // CLKGEN_PLL_SYS0_PLL_CHANNEL_CTRL_CH_3
+
+}
+
+static unsigned int moca_get_phy_freq(struct moca_priv_data *priv)
+{
+	unsigned int x = MOCA_RD(0x10100044); // CLKGEN_PLL_SYS1_PLL_CHANNEL_CTRL_CH_2
+
+	x = (x >> 1) & 0xFF; // Get the MDIV_CH2 field
+
+	return( x ? 2400 / x : 0);
+}
+
+/* called any time we start/restart/stop MoCA */
+static void moca_hw_init(struct moca_priv_data *priv, int action)
+{
+	u32 mask;
+	u32 temp;
+	u32 data;
+	u32 count = 0;
+	struct moca_platform_data * pMocaData = (struct moca_platform_data *)priv->pdev->dev.platform_data;
+
+	if (action == MOCA_ENABLE && !priv->enabled) {
+		clk_enable(priv->clk);
+
+		MOCA_WR(0x10404318, 0xfffffffd); // SUN_TOP_CTRL_SW_INIT_0_SET
+		udelay(20);
+		MOCA_WR(0x1040431c, 0xffffffff); // SUN_TOP_CTRL_SW_INIT_0_CLEAR --> Do this at start of sequence
+		udelay(20);
+   
+		priv->enabled = 1;
+	}
+
+	/* clock not enabled, register accesses will fail with bus error */
+	if (!priv->enabled)
+		return;
+
+	moca_hw_reset(priv);
+	udelay(1);
+
+	MOCA_WR(0x10800000, 0x03);       // EMUX_CNTRL
+	MOCA_WR(0x1080000c, 0x11);       // RGMII_0_CNTRL
+	MOCA_WR(0x10800014, 0xc0);       // RGMII_0_RX_CLK_DELAY_CNTRL
+	MOCA_WR(0x10800808, 0x010000db); // UMAC_CMD
+
+	MOCA_WR(0x104040a4, 0x01);       // GENERAL_CTRL_NO_SCAN_0
+	MOCA_WR(0x10404100, 0x11110011); // PIN_MUX_CTRL_0
+	MOCA_WR(0x10404104, 0x11111111); // PIN_MUX_CTRL_1
+
+	if (action == MOCA_ENABLE) {
+
+		/* Power up all zones */
+		moca_pmb_control(priv, PMB_COMMAND_ALL_ON);
+		moca_pmb_give_fw_cntrl(priv);
+
+		MOCA_UNSET(0x1010000c, 1);  // CLKGEN_PLL_SYS0_PLL_CHANNEL_CTRL_CH_3 
+
+		MOCA_WR(0x1010006C, 1);  // CLKGEN_PLL_SYS1_PLL_RESET 
+		MOCA_WR(0x10100068, 0);  // CLKGEN_PLL_SYS1_PLL_PWRDN 
+		data = 0;
+		while ((data & 0x1) == 0)
+		{
+			/* This typically is only read once */
+			data = MOCA_RD(0x10100060); // CLKGEN_PLL_SYS1_PLL_LOCK_STATUS
+
+			if (count++ > 10)
+				break;
+		}
+		MOCA_WR(0x1010006C, 0);  // CLKGEN_PLL_SYS1_PLL_RESET 
+
+		if (priv->bonded_mode) {
+			MOCA_UNSET(0x10100048, 1);  // CLKGEN_PLL_SYS1_PLL_CHANNEL_CTRL_CH_3 
+			MOCA_UNSET(0x10100050, 1);  // CLKGEN_PLL_SYS1_PLL_CHANNEL_CTRL_CH_5 
+		} else {
+			MOCA_SET(0x10100048, 1);  // CLKGEN_PLL_SYS1_PLL_CHANNEL_CTRL_CH_3 
+			MOCA_SET(0x10100050, 1);  // CLKGEN_PLL_SYS1_PLL_CHANNEL_CTRL_CH_5 
+		}
+		udelay(1);
+
+		/* deassert moca_sys_reset, system clock, phy0 and phy0 clock */
+		mask = (1 << 1) | (1 << 7) | (1 << 4) | (1 << 8);
+
+		/* deassert phy1 and phy1 clock in bonded mode */
+		if (priv->bonded_mode)
+			mask |= (1 << 5) | (1 << 9);
+
+		MOCA_UNSET(priv->base + priv->regs->sw_reset_offset, mask);
+		MOCA_RD(priv->base + priv->regs->sw_reset_offset);
+
+
+		/* Check for 6802/6803 A0 chip only with Xtal mod */
+		if ((pMocaData->chip_id & 0xFFFEFFFF) == 0x680200A0)
+		{
+			data = MOCA_RD(0x1040401c);
+			if ((data & 0x7) == 0x2) {
+				/* 25MHz */
+				printk("MoCA running with 25MHz XTAL\n");
+				MOCA_WR(priv->base + priv->regs->host2moca_mmp_outbox_0_offset, 1);
+			} else {
+				printk("MoCA == 50MHz XTAL\n");
+				/* 50MHz clock change only */
+				MOCA_WR(priv->base + priv->regs->host2moca_mmp_outbox_0_offset, 0);
+				//Note: The re-configuration is in NDIV_INT, not PDIV.
+				//`CLKGEN_REG_START + `CLKGEN_PLL_SYS1_PLL_DIV (32'h10100058) [09:00] = 10’d48
+				temp = MOCA_RD(0x10100058);
+				temp = (temp & 0xFFFFFC00) + 48;
+				MOCA_WR(0x10100058, temp);
+
+				//`CLKGEN_REG_START + `CLKGEN_PLL_SYS0_PLL_DIV (32'h10100018) [09:00] = 10’d40
+				temp = MOCA_RD(0x10100018);
+				temp = (temp & 0xFFFFFC00) + 40;
+				MOCA_WR(0x10100018, temp);
+
+				//`CLKGEN_REG_START + `CLKGEN_PLL_SYS1_PLL_CHANNEL_CTRL_CH_4 (32'h1010004C) [08:01] = 8’d48
+				temp = MOCA_RD(0x1010004c);
+				temp = (temp & 0xFFFFFE01) + (48 << 1);
+				MOCA_WR(0x1010004c, temp);
+
+				//`CLKGEN_REG_START + `CLKGEN_PLL_SYS1_PLL_CHANNEL_CTRL_CH_5 (32'h10100050) [08:01] = 8’d48
+				temp = MOCA_RD(0x10100050);
+				temp = (temp & 0xFFFFFE01) + (48 << 1);
+				MOCA_WR(0x10100050, temp);
+
+				// Then Restart the PLL.
+
+				//`CLKGEN_REG_START + `CLKGEN_PLL_SYS0_PLL_RESET (32'h1010002C) [0] = 1’b1
+				MOCA_SET(0x1010002c, 1);
+				//`CLKGEN_REG_START + `CLKGEN_PLL_SYS1_PLL_RESET (32'h1010006C) [0] = 1’b1
+				MOCA_SET(0x1010006c, 1);
+
+				udelay(1);
+
+				//`CLKGEN_REG_START + `CLKGEN_PLL_SYS0_PLL_RESET (32'h1010002C) [0] = 1’b0
+				MOCA_UNSET(0x1010002c, 1);
+				//`CLKGEN_REG_START + `CLKGEN_PLL_SYS1_PLL_RESET (32'h1010006C) [0] = 1’b0
+				MOCA_UNSET(0x1010006c, 1);
+			}
+		}
+
+		// CLKGEN_PLL_SYS1_PLL_SSC_MODE_CONTROL_HIGH
+		data = MOCA_RD(0x10100070);
+		data = (data & 0xFFFF0000) | 0x7dd;
+		MOCA_WR(0x10100070, data);
+
+		// CLKGEN_PLL_SYS1_PLL_SSC_MODE_CONTROL_LOW
+		data = MOCA_RD(0x10100074);
+		data = (data & 0xffc00000) | 0x3d71;
+		MOCA_WR(0x10100074, data);
+
+		// CLKGEN_PLL_SYS1_PLL_SSC_MODE_CONTROL_LOW
+		MOCA_SET(0x10100074, (1 << 22));
+		
+		printk("Set PLL SSC mode\n");
+	}
+
+
+	if (priv->hw_rev <= HWREV_MOCA_20_GEN21) {
+	/* clear junk out of GP0/GP1 */
+		MOCA_WR(priv->base + priv->regs->gp0_offset, 0xffffffff);
+		MOCA_WR(priv->base + priv->regs->gp1_offset, 0x0);
+		/* set up activity LED for 50% duty cycle */
+		MOCA_WR(priv->base + priv->regs->led_ctrl_offset,
+			0x40004000);
+	}
+
+	/* enable DMA completion interrupts */
+	mask = M2H_REQ | M2H_RESP | M2H_ASSERT | M2H_WDT_CPU1 |
+		M2H_NEXTCHUNK | M2H_DMA;
+
+	if (priv->hw_rev >= HWREV_MOCA_20_GEN21)
+		mask |= M2H_WDT_CPU0 | M2H_NEXTCHUNK_CPU0 |
+			M2H_REQ_CPU0 | M2H_RESP_CPU0 | M2H_ASSERT_CPU0;
+
+	MOCA_WR(priv->base + priv->regs->ringbell_offset, 0);
+	MOCA_WR(priv->base + priv->regs->l2_mask_clear_offset, mask);
+	MOCA_RD(priv->base + priv->regs->l2_mask_clear_offset);
+
+
+	/* Set pinmuxing for MoCA interrupt and flow control */
+	MOCA_UNSET(0x10404110, 0xF00000FF);
+	MOCA_SET(0x10404110, 0x10000022);
+ 
+	MOCA_WR(0x100b0318, 2);
+
+	if (action == MOCA_DISABLE && priv->enabled) {
+		priv->enabled = 0;
+		clk_disable(priv->clk);
+	}
+}
+
+static void moca_ringbell(struct moca_priv_data *priv, uint32_t mask)
+{
+	MOCA_WR(priv->base + priv->regs->ringbell_offset, mask);
+}
+
+static uint32_t moca_start_mips(struct moca_priv_data *priv, unsigned int cpu)
+{
+	if (priv->hw_rev == HWREV_MOCA_20_GEN22) {
+		if (cpu == 1)
+			MOCA_UNSET(priv->base + priv->regs->sw_reset_offset,
+				(1 << 0));
+		else {
+			moca_mmp_init(priv, 1);
+			MOCA_UNSET(priv->base + priv->regs->sw_reset_offset,
+				(1 << 2));
+		}
+	} else
+		MOCA_UNSET(priv->base + priv->regs->sw_reset_offset, (1 << 0));
+	MOCA_RD(priv->base + priv->regs->sw_reset_offset);
+
+	return(0);
+}
+
+static void moca_m2m_xfer(struct moca_priv_data *priv,
+	uint32_t dst, uint32_t src, uint32_t ctl)
+{
+	uint32_t status;
+
+	MOCA_WR(priv->base + priv->regs->m2m_src_offset, src);
+	MOCA_WR(priv->base + priv->regs->m2m_dst_offset, dst);
+	MOCA_WR(priv->base + priv->regs->m2m_status_offset, 0);
+	MOCA_RD(priv->base + priv->regs->m2m_status_offset);
+	MOCA_WR(priv->base + priv->regs->m2m_cmd_offset, ctl);
+
+	do {
+		status = MOCA_RD(priv->base + priv->regs->m2m_status_offset);
+	} while(status == 0);
+
+}
+
+static void moca_write_mem(struct moca_priv_data *priv,
+	uint32_t dst_offset, void *src, unsigned int len)
+{
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+
+	if((dst_offset >= priv->regs->cntl_mem_offset+priv->regs->cntl_mem_size) ||
+		((dst_offset + len) > priv->regs->cntl_mem_offset+priv->regs->cntl_mem_size)) {
+		printk(KERN_WARNING "%s: copy past end of cntl memory: %08x\n",
+			__FUNCTION__, dst_offset);
+		return;
+	}
+
+	if ( 1 == pd->use_dma )
+	{
+		dma_addr_t pa;
+
+		pa = dma_map_single(&priv->pdev->dev, src, len, DMA_TO_DEVICE);
+		mutex_lock(&priv->copy_mutex);
+		moca_m2m_xfer(priv, dst_offset + priv->regs->data_mem_offset, (uint32_t)pa, len | M2M_WRITE);
+		mutex_unlock(&priv->copy_mutex);
+		dma_unmap_single(&priv->pdev->dev, pa, len, DMA_TO_DEVICE);
+	}
+	else
+	{
+		uintptr_t addr = (uintptr_t)priv->base + priv->regs->data_mem_offset + dst_offset;
+		uint32_t *data = src;
+		int i;
+
+		mutex_lock(&priv->copy_mutex);
+		if (((struct moca_platform_data *)priv->pdev->dev.platform_data)->use_spi == 1)
+		{
+			src = data;
+			MOCA_WR_BLOCK(addr, src, len);
+		}
+		else
+		{
+			for(i = 0; i < len; i += 4, addr += 4, data++)
+				MOCA_WR(addr, *data);
+			MOCA_RD(addr - 4);	/* flush write */
+		}
+
+		mutex_unlock(&priv->copy_mutex);
+	}
+}
+
+static void moca_read_mem(struct moca_priv_data *priv,
+	void *dst, uint32_t src_offset, unsigned int len)
+{
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+    
+	if((src_offset >= priv->regs->cntl_mem_offset+priv->regs->cntl_mem_size) ||
+		((src_offset + len) > priv->regs->cntl_mem_offset+priv->regs->cntl_mem_size)) {
+		printk(KERN_WARNING "%s: copy past end of cntl memory: %08x\n",
+			__FUNCTION__, src_offset);
+		return;
+	}
+
+	if ( 1 == pd->use_dma )
+	{
+		dma_addr_t pa;
+
+		pa = dma_map_single(&priv->pdev->dev, dst, len, DMA_FROM_DEVICE);
+		mutex_lock(&priv->copy_mutex);
+		moca_m2m_xfer(priv, (uint32_t)pa, src_offset + priv->regs->data_mem_offset, len | M2M_READ);
+		mutex_unlock(&priv->copy_mutex);
+		dma_unmap_single(&priv->pdev->dev, pa, len, DMA_FROM_DEVICE);
+	}
+	else
+	{
+		uintptr_t addr = priv->regs->data_mem_offset + src_offset;
+		uint32_t *data = dst;
+		int i;
+
+		mutex_lock(&priv->copy_mutex);
+		if (((struct moca_platform_data *)priv->pdev->dev.platform_data)->use_spi == 1)
+		{
+			MOCA_RD_BLOCK((uintptr_t)priv->base + addr, dst, len);
+		}
+		else
+		{
+			for(i = 0; i < len; i += 4, addr += 4, data++)
+	 			*data = MOCA_RD((uintptr_t)priv->base + addr);
+		}
+		mutex_unlock(&priv->copy_mutex);
+	}
+}
+
+static void moca_write_sg(struct moca_priv_data *priv,
+	uint32_t dst_offset, struct scatterlist *sg, int nents)
+{
+	int j;
+	uintptr_t addr = priv->regs->data_mem_offset + dst_offset;
+	struct moca_platform_data *pd = priv->pdev->dev.platform_data;
+
+	dma_map_sg(&priv->pdev->dev, sg, nents, DMA_TO_DEVICE);
+
+	mutex_lock(&priv->copy_mutex);
+	for(j = 0; j < nents; j++)
+	{
+		if ( 1 == pd->use_dma )
+		{
+		    // printk("XXX copying page %d, PA %08x\n", j, (int)sg[j].dma_address);
+			moca_m2m_xfer(priv, addr, (uint32_t)sg[j].dma_address, 
+				sg[j].length | M2M_WRITE);
+
+			addr += sg[j].length;
+		}
+		else
+		{
+			unsigned long *data = (void *)phys_to_virt(sg[j].dma_address);
+         //printk("%s: Writing 0x%lx to addr 0x%08lx (len = %d)\n", __FUNCTION__, *data, ((unsigned long)priv->base) + addr, sg[j].length);
+			MOCA_WR_BLOCK(((unsigned long)priv->base) + addr, data, sg[j].length);
+			addr += sg[j].length;
+		}
+	}
+	mutex_unlock(&priv->copy_mutex);
+
+	dma_unmap_sg(&priv->pdev->dev, sg, nents, DMA_TO_DEVICE);
+}
+
+/* NOTE: this function is not tested */
+#if 0
+static void moca_read_sg(struct moca_priv_data *priv,
+	uint32_t src_offset, struct scatterlist *sg, int nents)
+{
+	int j;
+	uintptr_t addr = priv->data_mem_offset + src_offset;
+
+	dma_map_sg(&priv->pdev->dev, sg, nents, DMA_FROM_DEVICE);
+
+	mutex_lock(&priv->copy_mutex);
+	for(j = 0; j < nents; j++) {
+#if 0 //USE_DMA
+		 printk("XXX copying page %d, PA %08x\n", j, (int)sg[j].dma_address);
+		moca_m2m_xfer(priv, addr, (uint32_t)sg[j].dma_address,
+			sg[j].length | M2M_READ);
+
+		addr += sg[j].length;
+#else
+		uint32_t *data = (void *)phys_to_virt(sg[j].dma_address);
+		unsigned int len = sg[j].length;
+		int i;
+
+		for(i = 0; i < len; i += 4, addr += 4, data++) {
+			*data = cpu_to_be32(
+				MOCA_RD((uintptr_t)priv->base + addr));
+			//printk("MoCA READ: AD 0x%x  = 0x%x (0x%x)\n", (priv->base + addr), MOCA_RD((uintptr_t)priv->base + addr), *data);
+		 }
+#endif
+	}
+	mutex_unlock(&priv->copy_mutex);
+
+	dma_unmap_sg(&priv->pdev->dev, sg, nents, DMA_FROM_DEVICE);
+}
+#endif
+
+static void moca_read_mac_addr(struct moca_priv_data *priv, uint32_t * hi, uint32_t * lo)
+{
+	struct net_device * pdev ;
+	char					 mocaName[7] ;
+
+	if (priv == NULL)
+		sprintf (mocaName, "moca%u", 0) ;
+	else
+		sprintf (mocaName, "moca%u", ((struct moca_platform_data *)priv->pdev->dev.platform_data)->devId) ;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+	pdev = dev_get_by_name ( &init_net, mocaName ) ;
+#else
+	pdev = dev_get_by_name ( mocaName ) ;
+#endif
+
+	if ((pdev != NULL) && (lo != NULL) && (hi != NULL)) {
+		mac_to_u32(hi, lo, pdev->dev_addr);
+	}
+}
+
+
+#if defined(DSL_MOCA)
+
+/*
+ * This helper function was added to allow the enet driver to compile in
+ * consumer environment for 68xx profiles.
+ */
+void moca_get_fc_bits(void * arg, unsigned long *moca_fc_reg)
+{
+	struct moca_priv_data *     priv;
+	struct moca_platform_data * pMocaData;
+	unsigned long               flags;
+
+	if (arg == NULL) {
+		return;
+	}
+
+	priv = (struct moca_priv_data *) arg;
+	pMocaData = (struct moca_platform_data *)priv->pdev->dev.platform_data;
+
+	*moca_fc_reg = 0;
+	if (priv != NULL)
+	{
+		/* We can't read moca core regs unless the core's clocks are on. */
+		spin_lock_irqsave(&priv->clock_lock, flags);
+		if (priv->running) {
+			*moca_fc_reg = MOCA_RD(priv->base+priv->regs->sideband_gmii_fc_offset);
+		}
+		spin_unlock_irqrestore(&priv->clock_lock, flags);
+	}
+}
+
+#endif /* DSL_MOCA */
+
+static int __devinit bmoca_spi_probe(struct spi_device *spi) {
+	// TODO(apenwarr): match one spi device to one moca device struct.
+	// I happen to know that right now the system only registers one of
+	// moca_lan or moca_wan, never both, and there is never more than
+	// one moca chip present on our systems, so this is okay for now.
+	uint32_t val = kerSysBcmSpiSlaveReadReg32(spi, 0x10404000);
+	pr_info("bmoca_spi_probe bus=%d chip_select=%d: id=%08x %s\n",
+		spi->master->bus_num, spi->chip_select, val,
+		val != 0 ? "yes" : "no");
+	if (val == 0) return -ENODEV;
+	moca_lan_data.spi = spi;
+	moca_wan_data.spi = spi;
+	return 0; // success
+}
+
+static int __devexit bmoca_spi_remove(struct spi_device *spi) {
+	pr_info("bmoca_spi_remove\n");
+	if (moca_lan_data.spi == spi) moca_lan_data.spi = NULL;
+	if (moca_wan_data.spi == spi) moca_wan_data.spi = NULL;
+	return 0; // success
+}
+
+static struct spi_driver bmoca_spi_driver = {
+  .driver = {
+    .name = "bmoca",
+    .owner = THIS_MODULE,
+  },
+  .probe = bmoca_spi_probe,
+  .remove = __devexit_p(bmoca_spi_remove),
+};
+
+//extern void bcmenet_register_moca_fc_bits_cb(void cb(void *, unsigned long *), int isWan, void * arg);
+
+static int  hw_specific_init( struct moca_priv_data *priv )
+{
+#ifdef DSL_MOCA
+	struct moca_platform_data *pMocaData;
+
+	pMocaData = (struct moca_platform_data *)priv->pdev->dev.platform_data;
+
+	/* fill in the hw_rev field */
+	pMocaData->chip_id = MOCA_RD(0x10404004) + 0xA0;
+        pr_info("read moca chip id: %08x\n", pMocaData->chip_id);
+
+	pMocaData->hw_rev = HWREV_MOCA_20_GEN22;
+
+	/* Power down all LEAP memories */
+	MOCA_WR(0x101000e4, 0x6); // CLKGEN_LEAP_TOP_INST_DATA   
+	MOCA_WR(0x101000e8, 0x6); // CLKGEN_LEAP_TOP_INST_HAB 
+	MOCA_WR(0x101000ec, 0x6); // CLKGEN_LEAP_TOP_INST_PROG0
+	MOCA_WR(0x101000f0, 0x6); // CLKGEN_LEAP_TOP_INST_PROG1   
+	MOCA_WR(0x101000f4, 0x6); // CLKGEN_LEAP_TOP_INST_PROG2  
+	MOCA_WR(0x101000f8, 0x6); // CLKGEN_LEAP_TOP_INST_ROM
+	MOCA_WR(0x101000fc, 0x6); // CLKGEN_LEAP_TOP_INST_SHARED  
+	MOCA_WR(0x10100164, 0x3); // CLKGEN_SYS_CTRL_INST_POWER_SWITCH_MEMORY 
+
+//	bcmenet_register_moca_fc_bits_cb(
+//		moca_get_fc_bits, pMocaData->use_spi ? 1 : 0, (void *)priv);
+#endif
+
+	return 0;
+}
+
+static int moca_platform_dev_register(void)
+{
+	struct moca_platform_data *pMocaData;
+	struct platform_device *pPlatformDev;
+	BP_MOCA_INFO mocaInfo[BP_MOCA_MAX_NUM];
+	int mocaChipNum = BP_MOCA_MAX_NUM;
+	int i;
+	int ret = 0;   
+
+	BpGetMocaInfo(mocaInfo, &mocaChipNum);
+
+	ret = spi_register_driver(&bmoca_spi_driver);
+	if (ret < 0) return ret;
+
+	for (i = 0; i < mocaChipNum; i++) {
+		switch (mocaInfo[i].type) {
+			case BP_MOCA_TYPE_WAN:
+				pMocaData = &moca_wan_data;
+				pPlatformDev = &moca_wan_plat_dev;
+				break;
+
+			case BP_MOCA_TYPE_LAN:
+				pMocaData = &moca_lan_data;
+				pPlatformDev = &moca_lan_plat_dev;
+				break;
+
+			default:
+				printk(KERN_ERR "bmoca: unrecognized MoCA type %d\n",
+					mocaInfo[i].type);
+				return(-1);
+				break;
+		}
+
+		ret = platform_device_register(pPlatformDev);
+		if (ret < 0) {
+			spi_unregister_driver(&bmoca_spi_driver);
+			return(ret);
+		}
+		else {
+			pMocaData->devId = i;
+
+			/* Map the board params RF Band to the bmoca.h value */
+			switch (mocaInfo[i].rfBand)
+			{
+				case BP_MOCA_RF_BAND_D_LOW:
+					pMocaData->rf_band = MOCA_BAND_D_LOW;
+					break;
+				case BP_MOCA_RF_BAND_D_HIGH:
+					pMocaData->rf_band = MOCA_BAND_D_HIGH;
+					break;
+				case BP_MOCA_RF_BAND_EXT_D:
+					pMocaData->rf_band = MOCA_BAND_EXT_D;
+					break;
+				case BP_MOCA_RF_BAND_E:
+					pMocaData->rf_band = MOCA_BAND_E;
+					break;
+				case BP_MOCA_RF_BAND_F:    
+					pMocaData->rf_band = MOCA_BAND_F;
+					break;
+				default:
+					/* Do nothing */
+					break;
+			}
+			printk(KERN_INFO "bmoca: Found MoCA device %d/%d  RF Band %d\n",
+				i, mocaChipNum, mocaInfo[i].rfBand);
+		}
+	}
+
+	return(ret);
+}
+
+static void moca_platform_dev_unregister(void)
+{
+	spi_unregister_driver(&bmoca_spi_driver);
+
+	if (moca_lan_data.devId != MOCA_DEVICE_ID_UNREGISTERED)
+		platform_device_unregister(&moca_lan_plat_dev);
+
+	if (moca_wan_data.devId != MOCA_DEVICE_ID_UNREGISTERED)
+		platform_device_unregister(&moca_wan_plat_dev);
+}
+
+static void moca_3450_write(struct moca_priv_data *priv, u8 addr, u32 data)
+{
+	/* comment out for now. We don't use i2c on the 63268BHR board */
+#ifdef MOCA_3450_USE_I2C
+	if (((struct moca_platform_data *)priv->pdev->dev.platform_data)->use_spi == 0)
+		bcm3450_write_reg(addr, data);
+	else
+#endif
+		moca_3450_write_i2c(priv, addr, data);
+}
+
+static u32 moca_3450_read(struct moca_priv_data *priv, u8 addr)
+{
+	/* comment out for now. We don't use i2c on the 63268BHR board */
+#ifdef MOCA_3450_USE_I2C
+	if (((struct moca_platform_data *)priv->pdev->dev.platform_data)->use_spi == 0)
+		return(bcm3450_read_reg(addr));
+	else
+#endif
+		return(moca_3450_read_i2c(priv, addr));
+}
+
+/*
+ * PM STUBS
+ */
+
+struct clk *clk_get(struct device *dev, const char *id)
+{
+	return NULL;
+}
+
+int clk_enable(struct clk *clk)
+{
+
+	return 0;
+}
+
+void clk_disable(struct clk *clk)
+{
+}
+
+void clk_put(struct clk *clk)
+{
+}
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	return 0;
+}
+
diff --git a/bmoca.c b/bmoca.c
new file mode 120000
index 0000000..621a8ab
--- /dev/null
+++ b/bmoca.c
@@ -0,0 +1 @@
+3.3/bmoca.c
\ No newline at end of file
diff --git a/bmoca.h b/bmoca.h
new file mode 100644
index 0000000..0c238f5
--- /dev/null
+++ b/bmoca.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _BMOCA_H_
+#define _BMOCA_H_
+
+#include <linux/if.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* NOTE: These need to match what is defined in the API template */
+#define MOCA_IE_DRV_PRINTF	0xff00
+#define MOCA_IE_WDT		0xff01
+
+#define MOCA_BAND_HIGHRF	0
+#define MOCA_BAND_MIDRF		1
+#define MOCA_BAND_WANRF		2
+#define MOCA_BAND_EXT_D		3
+#define MOCA_BAND_D_LOW		4
+#define MOCA_BAND_D_HIGH	5
+#define MOCA_BAND_E		6
+#define MOCA_BAND_F		7
+#define MOCA_BAND_G		8
+#define MOCA_BAND_H		9
+#define MOCA_BAND_MAX		10
+
+#define MOCA_BAND_NAMES { \
+	"highrf", "midrf", "wanrf", \
+	"ext_d", "d_low", "d_high", \
+	"e", "f", "g", \
+}
+
+#define MOCA_BOOT_FLAGS_BONDED	(1 << 0)
+
+#define MOCA_IOC_MAGIC		'M'
+
+#define MOCA_IOCTL_GET_DRV_INFO_V2	_IOR(MOCA_IOC_MAGIC, 0, \
+	struct moca_kdrv_info_v2)
+
+#define MOCA_IOCTL_START	_IOW(MOCA_IOC_MAGIC, 1, struct moca_start)
+#define MOCA_IOCTL_STOP		_IO(MOCA_IOC_MAGIC, 2)
+#define MOCA_IOCTL_READMEM	_IOR(MOCA_IOC_MAGIC, 3, struct moca_xfer)
+#define MOCA_IOCTL_WRITEMEM	_IOR(MOCA_IOC_MAGIC, 4, struct moca_xfer)
+
+#define MOCA_IOCTL_CHECK_FOR_DATA	_IOR(MOCA_IOC_MAGIC, 5, int)
+#define MOCA_IOCTL_WOL		_IOW(MOCA_IOC_MAGIC, 6, int)
+#define MOCA_IOCTL_GET_DRV_INFO	_IOR(MOCA_IOC_MAGIC, 0, struct moca_kdrv_info)
+#define MOCA_IOCTL_SET_CPU_RATE	_IOR(MOCA_IOC_MAGIC, 7, unsigned int)
+#define MOCA_IOCTL_SET_PHY_RATE	_IOR(MOCA_IOC_MAGIC, 8, unsigned int)
+#define MOCA_IOCTL_GET_3450_REG	_IOR(MOCA_IOC_MAGIC, 9, unsigned int)
+#define MOCA_IOCTL_SET_3450_REG	_IOR(MOCA_IOC_MAGIC, 10, unsigned int)
+
+#define MOCA_DEVICE_ID_UNREGISTERED  (-1)
+
+/* this must match MoCAOS_IFNAMSIZE */
+#define MOCA_IFNAMSIZ		16
+
+/* ID value hinting ioctl caller to use returned IFNAME as is */
+#define MOCA_IFNAME_USE_ID    0xffffffff
+
+/* Legacy version of moca_kdrv_info */
+struct moca_kdrv_info_v2 {
+	__u32			version;
+	__u32			build_number;
+	__u32			builtin_fw;
+
+	__u32			hw_rev;
+	__u32			rf_band;
+
+	__u32			uptime;
+	__s32			refcount;
+	__u32			gp1;
+
+	__s8			enet_name[MOCA_IFNAMSIZ];
+	__u32			enet_id;
+
+	__u32			macaddr_hi;
+	__u32			macaddr_lo;
+
+	__u32			phy_freq;
+	__u32			device_id;
+};
+
+/* this must match MoCAOS_DrvInfo */
+struct moca_kdrv_info {
+	__u32			version;
+	__u32			build_number;
+	__u32			builtin_fw;
+
+	__u32			hw_rev;
+	__u32			rf_band;
+
+	__u32			uptime;
+	__s32			refcount;
+	__u32			gp1;
+
+	__s8			enet_name[MOCA_IFNAMSIZ];
+	__u32			enet_id;
+
+	__u32			macaddr_hi;
+	__u32			macaddr_lo;
+
+	__u32			phy_freq;
+	__u32			device_id;
+
+	__u32			chip_id;
+};
+
+struct moca_xfer {
+	__u64			buf;
+	__u32			len;
+	__u32			moca_addr;
+};
+
+struct moca_start {
+	struct moca_xfer	x;
+	__u32			boot_flags;
+};
+
+#ifdef __KERNEL__
+
+static inline void mac_to_u32(uint32_t *hi, uint32_t *lo, const uint8_t *mac)
+{
+	*hi = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | (mac[3] << 0);
+	*lo = (mac[4] << 24) | (mac[5] << 16);
+}
+
+static inline void u32_to_mac(uint8_t *mac, uint32_t hi, uint32_t lo)
+{
+	mac[0] = (hi >> 24) & 0xff;
+	mac[1] = (hi >> 16) & 0xff;
+	mac[2] = (hi >>  8) & 0xff;
+	mac[3] = (hi >>  0) & 0xff;
+	mac[4] = (lo >> 24) & 0xff;
+	mac[5] = (lo >> 16) & 0xff;
+}
+
+struct moca_platform_data {
+	char			enet_name[IFNAMSIZ];
+	unsigned int		enet_id;
+
+	u32			macaddr_hi;
+	u32			macaddr_lo;
+
+	phys_addr_t		bcm3450_i2c_base;
+	int			bcm3450_i2c_addr;
+
+	u32			hw_rev;  /* this is the chip_id */
+	u32			rf_band;
+
+	int			use_dma;
+	int			use_spi;
+	int			devId;
+	struct spi_device	*spi;
+
+	u32			chip_id;
+
+#ifdef CONFIG_SMP
+	int			smp_processor_id;
+#endif
+};
+
+enum {
+	HWREV_MOCA_11		= 0x1100,
+	HWREV_MOCA_11_LITE	= 0x1101,
+	HWREV_MOCA_11_PLUS	= 0x1102,
+	HWREV_MOCA_20_ALT	= 0x2000, /* for backward compatibility */
+	HWREV_MOCA_20_GEN21	= 0x2001,
+	HWREV_MOCA_20_GEN22	= 0x2002,
+	HWREV_MOCA_20_GEN23	= 0x2003,
+};
+
+
+#define MOCA_PROTVER_11		0x1100
+#define MOCA_PROTVER_20		0x2000
+#define MOCA_PROTVER_MASK	0xff00
+
+#endif /* __KERNEL__ */
+
+#endif /* ! _BMOCA_H_ */
diff --git a/linux b/linux
new file mode 120000
index 0000000..945c9b4
--- /dev/null
+++ b/linux
@@ -0,0 +1 @@
+.
\ No newline at end of file