Snapshot of kernel 2.6.32.11.

- cd $(BUILDROOT)/../kernel
- git clone git://git.kernel.org/pub/scm/linux/kernel/git/stable/\
linux-stable.git linux
- cd linux
- git checkout -b v2.6.32.11 v2.6.32.11
- cp -prf * ../prism/
- cd ../prism
- rm -rf ../linux
- git add -Af .
- git commit

Change-Id: I9e84aaa6fdc9fa5ddab728d3f9864f04548df344
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
new file mode 100644
index 0000000..55c9c59
--- /dev/null
+++ b/drivers/edac/Kconfig
@@ -0,0 +1,263 @@
+#
+#	EDAC Kconfig
+#	Copyright (c) 2008 Doug Thompson www.softwarebitmaker.com
+#	Licensed and distributed under the GPL
+#
+
+menuconfig EDAC
+	bool "EDAC (Error Detection And Correction) reporting"
+	depends on HAS_IOMEM
+	depends on X86 || PPC
+	help
+	  EDAC is designed to report errors in the core system.
+	  These are low-level errors that are reported in the CPU or
+	  supporting chipset or other subsystems:
+	  memory errors, cache errors, PCI errors, thermal throttling, etc..
+	  If unsure, select 'Y'.
+
+	  If this code is reporting problems on your system, please
+	  see the EDAC project web pages for more information at:
+
+	  <http://bluesmoke.sourceforge.net/>
+
+	  and:
+
+	  <http://buttersideup.com/edacwiki>
+
+	  There is also a mailing list for the EDAC project, which can
+	  be found via the sourceforge page.
+
+if EDAC
+
+comment "Reporting subsystems"
+
+config EDAC_DEBUG
+	bool "Debugging"
+	help
+	  This turns on debugging information for the entire EDAC
+	  sub-system. You can insert module with "debug_level=x", current
+	  there're four debug levels (x=0,1,2,3 from low to high).
+	  Usually you should select 'N'.
+
+config EDAC_DEBUG_VERBOSE
+	bool "More verbose debugging"
+	depends on EDAC_DEBUG
+	help
+	  This option makes debugging information more verbose.
+	  Source file name and line number where debugging message
+	  printed will be added to debugging message.
+
+ config EDAC_DECODE_MCE
+	tristate "Decode MCEs in human-readable form (only on AMD for now)"
+	depends on CPU_SUP_AMD && X86_MCE
+	default y
+	---help---
+	  Enable this option if you want to decode Machine Check Exceptions
+	  occuring on your machine in human-readable form.
+
+	  You should definitely say Y here in case you want to decode MCEs
+	  which occur really early upon boot, before the module infrastructure
+	  has been initialized.
+
+config EDAC_MM_EDAC
+	tristate "Main Memory EDAC (Error Detection And Correction) reporting"
+	help
+	  Some systems are able to detect and correct errors in main
+	  memory.  EDAC can report statistics on memory error
+	  detection and correction (EDAC - or commonly referred to ECC
+	  errors).  EDAC will also try to decode where these errors
+	  occurred so that a particular failing memory module can be
+	  replaced.  If unsure, select 'Y'.
+
+config EDAC_AMD64
+	tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h"
+	depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI && EDAC_DECODE_MCE
+	help
+	  Support for error detection and correction on the AMD 64
+	  Families of Memory Controllers (K8, F10h and F11h)
+
+config EDAC_AMD64_ERROR_INJECTION
+	bool "Sysfs Error Injection facilities"
+	depends on EDAC_AMD64
+	help
+	  Recent Opterons (Family 10h and later) provide for Memory Error
+	  Injection into the ECC detection circuits. The amd64_edac module
+	  allows the operator/user to inject Uncorrectable and Correctable
+	  errors into DRAM.
+
+	  When enabled, in each of the respective memory controller directories
+	  (/sys/devices/system/edac/mc/mcX), there are 3 input files:
+
+	  - inject_section (0..3, 16-byte section of 64-byte cacheline),
+	  - inject_word (0..8, 16-bit word of 16-byte section),
+	  - inject_ecc_vector (hex ecc vector: select bits of inject word)
+
+	  In addition, there are two control files, inject_read and inject_write,
+	  which trigger the DRAM ECC Read and Write respectively.
+
+config EDAC_AMD76X
+	tristate "AMD 76x (760, 762, 768)"
+	depends on EDAC_MM_EDAC && PCI && X86_32
+	help
+	  Support for error detection and correction on the AMD 76x
+	  series of chipsets used with the Athlon processor.
+
+config EDAC_E7XXX
+	tristate "Intel e7xxx (e7205, e7500, e7501, e7505)"
+	depends on EDAC_MM_EDAC && PCI && X86_32
+	help
+	  Support for error detection and correction on the Intel
+	  E7205, E7500, E7501 and E7505 server chipsets.
+
+config EDAC_E752X
+	tristate "Intel e752x (e7520, e7525, e7320) and 3100"
+	depends on EDAC_MM_EDAC && PCI && X86 && HOTPLUG
+	help
+	  Support for error detection and correction on the Intel
+	  E7520, E7525, E7320 server chipsets.
+
+config EDAC_I82443BXGX
+	tristate "Intel 82443BX/GX (440BX/GX)"
+	depends on EDAC_MM_EDAC && PCI && X86_32
+	depends on BROKEN
+	help
+	  Support for error detection and correction on the Intel
+	  82443BX/GX memory controllers (440BX/GX chipsets).
+
+config EDAC_I82875P
+	tristate "Intel 82875p (D82875P, E7210)"
+	depends on EDAC_MM_EDAC && PCI && X86_32
+	help
+	  Support for error detection and correction on the Intel
+	  DP82785P and E7210 server chipsets.
+
+config EDAC_I82975X
+	tristate "Intel 82975x (D82975x)"
+	depends on EDAC_MM_EDAC && PCI && X86
+	help
+	  Support for error detection and correction on the Intel
+	  DP82975x server chipsets.
+
+config EDAC_I3000
+	tristate "Intel 3000/3010"
+	depends on EDAC_MM_EDAC && PCI && X86
+	help
+	  Support for error detection and correction on the Intel
+	  3000 and 3010 server chipsets.
+
+config EDAC_I3200
+	tristate "Intel 3200"
+	depends on EDAC_MM_EDAC && PCI && X86 && EXPERIMENTAL
+	help
+	  Support for error detection and correction on the Intel
+	  3200 and 3210 server chipsets.
+
+config EDAC_X38
+	tristate "Intel X38"
+	depends on EDAC_MM_EDAC && PCI && X86
+	help
+	  Support for error detection and correction on the Intel
+	  X38 server chipsets.
+
+config EDAC_I5400
+	tristate "Intel 5400 (Seaburg) chipsets"
+	depends on EDAC_MM_EDAC && PCI && X86
+	help
+	  Support for error detection and correction the Intel
+	  i5400 MCH chipset (Seaburg).
+
+config EDAC_I82860
+	tristate "Intel 82860"
+	depends on EDAC_MM_EDAC && PCI && X86_32
+	help
+	  Support for error detection and correction on the Intel
+	  82860 chipset.
+
+config EDAC_R82600
+	tristate "Radisys 82600 embedded chipset"
+	depends on EDAC_MM_EDAC && PCI && X86_32
+	help
+	  Support for error detection and correction on the Radisys
+	  82600 embedded chipset.
+
+config EDAC_I5000
+	tristate "Intel Greencreek/Blackford chipset"
+	depends on EDAC_MM_EDAC && X86 && PCI
+	help
+	  Support for error detection and correction the Intel
+	  Greekcreek/Blackford chipsets.
+
+config EDAC_I5100
+	tristate "Intel San Clemente MCH"
+	depends on EDAC_MM_EDAC && X86 && PCI
+	help
+	  Support for error detection and correction the Intel
+	  San Clemente MCH.
+
+config EDAC_MPC85XX
+	tristate "Freescale MPC83xx / MPC85xx"
+	depends on EDAC_MM_EDAC && FSL_SOC && (PPC_83xx || MPC85xx)
+	help
+	  Support for error detection and correction on the Freescale
+	  MPC8349, MPC8560, MPC8540, MPC8548
+
+config EDAC_MV64X60
+	tristate "Marvell MV64x60"
+	depends on EDAC_MM_EDAC && MV64X60
+	help
+	  Support for error detection and correction on the Marvell
+	  MV64360 and MV64460 chipsets.
+
+config EDAC_PASEMI
+	tristate "PA Semi PWRficient"
+	depends on EDAC_MM_EDAC && PCI
+	depends on PPC_PASEMI
+	help
+	  Support for error detection and correction on PA Semi
+	  PWRficient.
+
+config EDAC_CELL
+	tristate "Cell Broadband Engine memory controller"
+	depends on EDAC_MM_EDAC && PPC_CELL_COMMON
+	help
+	  Support for error detection and correction on the
+	  Cell Broadband Engine internal memory controller
+	  on platform without a hypervisor
+
+config EDAC_PPC4XX
+	tristate "PPC4xx IBM DDR2 Memory Controller"
+	depends on EDAC_MM_EDAC && 4xx
+	help
+	  This enables support for EDAC on the ECC memory used
+	  with the IBM DDR2 memory controller found in various
+	  PowerPC 4xx embedded processors such as the 405EX[r],
+	  440SP, 440SPe, 460EX, 460GT and 460SX.
+
+config EDAC_AMD8131
+	tristate "AMD8131 HyperTransport PCI-X Tunnel"
+	depends on EDAC_MM_EDAC && PCI && PPC_MAPLE
+	help
+	  Support for error detection and correction on the
+	  AMD8131 HyperTransport PCI-X Tunnel chip.
+	  Note, add more Kconfig dependency if it's adopted
+	  on some machine other than Maple.
+
+config EDAC_AMD8111
+	tristate "AMD8111 HyperTransport I/O Hub"
+	depends on EDAC_MM_EDAC && PCI && PPC_MAPLE
+	help
+	  Support for error detection and correction on the
+	  AMD8111 HyperTransport I/O Hub chip.
+	  Note, add more Kconfig dependency if it's adopted
+	  on some machine other than Maple.
+
+config EDAC_CPC925
+	tristate "IBM CPC925 Memory Controller (PPC970FX)"
+	depends on EDAC_MM_EDAC && PPC64
+	help
+	  Support for error detection and correction on the
+	  IBM CPC925 Bridge and Memory Controller, which is
+	  a companion chip to the PowerPC 970 family of
+	  processors.
+
+endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
new file mode 100644
index 0000000..bc5dc23
--- /dev/null
+++ b/drivers/edac/Makefile
@@ -0,0 +1,50 @@
+#
+# Makefile for the Linux kernel EDAC drivers.
+#
+# Copyright 02 Jul 2003, Linux Networx (http://lnxi.com)
+# This file may be distributed under the terms of the
+# GNU General Public License.
+#
+
+obj-$(CONFIG_EDAC)			:= edac_stub.o
+obj-$(CONFIG_EDAC_MM_EDAC)		+= edac_core.o
+
+edac_core-objs	:= edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o
+edac_core-objs	+= edac_module.o edac_device_sysfs.o
+
+ifdef CONFIG_PCI
+edac_core-objs	+= edac_pci.o edac_pci_sysfs.o
+endif
+
+obj-$(CONFIG_EDAC_DECODE_MCE)		+= edac_mce_amd.o
+
+obj-$(CONFIG_EDAC_AMD76X)		+= amd76x_edac.o
+obj-$(CONFIG_EDAC_CPC925)		+= cpc925_edac.o
+obj-$(CONFIG_EDAC_I5000)		+= i5000_edac.o
+obj-$(CONFIG_EDAC_I5100)		+= i5100_edac.o
+obj-$(CONFIG_EDAC_I5400)		+= i5400_edac.o
+obj-$(CONFIG_EDAC_E7XXX)		+= e7xxx_edac.o
+obj-$(CONFIG_EDAC_E752X)		+= e752x_edac.o
+obj-$(CONFIG_EDAC_I82443BXGX)		+= i82443bxgx_edac.o
+obj-$(CONFIG_EDAC_I82875P)		+= i82875p_edac.o
+obj-$(CONFIG_EDAC_I82975X)		+= i82975x_edac.o
+obj-$(CONFIG_EDAC_I3000)		+= i3000_edac.o
+obj-$(CONFIG_EDAC_I3200)		+= i3200_edac.o
+obj-$(CONFIG_EDAC_X38)			+= x38_edac.o
+obj-$(CONFIG_EDAC_I82860)		+= i82860_edac.o
+obj-$(CONFIG_EDAC_R82600)		+= r82600_edac.o
+
+amd64_edac_mod-y := amd64_edac.o
+amd64_edac_mod-$(CONFIG_EDAC_DEBUG) += amd64_edac_dbg.o
+amd64_edac_mod-$(CONFIG_EDAC_AMD64_ERROR_INJECTION) += amd64_edac_inj.o
+
+obj-$(CONFIG_EDAC_AMD64)		+= amd64_edac_mod.o
+
+obj-$(CONFIG_EDAC_PASEMI)		+= pasemi_edac.o
+obj-$(CONFIG_EDAC_MPC85XX)		+= mpc85xx_edac.o
+obj-$(CONFIG_EDAC_MV64X60)		+= mv64x60_edac.o
+obj-$(CONFIG_EDAC_CELL)			+= cell_edac.o
+obj-$(CONFIG_EDAC_PPC4XX)		+= ppc4xx_edac.o
+obj-$(CONFIG_EDAC_AMD8111)		+= amd8111_edac.o
+obj-$(CONFIG_EDAC_AMD8131)		+= amd8131_edac.o
+
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
new file mode 100644
index 0000000..01bc8e2
--- /dev/null
+++ b/drivers/edac/amd64_edac.c
@@ -0,0 +1,3239 @@
+#include "amd64_edac.h"
+#include <asm/k8.h>
+
+static struct edac_pci_ctl_info *amd64_ctl_pci;
+
+static int report_gart_errors;
+module_param(report_gart_errors, int, 0644);
+
+/*
+ * Set by command line parameter. If BIOS has enabled the ECC, this override is
+ * cleared to prevent re-enabling the hardware by this driver.
+ */
+static int ecc_enable_override;
+module_param(ecc_enable_override, int, 0644);
+
+static struct msr *msrs;
+
+/* Lookup table for all possible MC control instances */
+struct amd64_pvt;
+static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
+static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES];
+
+/*
+ * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
+ * for DDR2 DRAM mapping.
+ */
+u32 revf_quad_ddr2_shift[] = {
+	0,	/* 0000b NULL DIMM (128mb) */
+	28,	/* 0001b 256mb */
+	29,	/* 0010b 512mb */
+	29,	/* 0011b 512mb */
+	29,	/* 0100b 512mb */
+	30,	/* 0101b 1gb */
+	30,	/* 0110b 1gb */
+	31,	/* 0111b 2gb */
+	31,	/* 1000b 2gb */
+	32,	/* 1001b 4gb */
+	32,	/* 1010b 4gb */
+	33,	/* 1011b 8gb */
+	0,	/* 1100b future */
+	0,	/* 1101b future */
+	0,	/* 1110b future */
+	0	/* 1111b future */
+};
+
+/*
+ * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
+ * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
+ * or higher value'.
+ *
+ *FIXME: Produce a better mapping/linearisation.
+ */
+
+struct scrubrate scrubrates[] = {
+	{ 0x01, 1600000000UL},
+	{ 0x02, 800000000UL},
+	{ 0x03, 400000000UL},
+	{ 0x04, 200000000UL},
+	{ 0x05, 100000000UL},
+	{ 0x06, 50000000UL},
+	{ 0x07, 25000000UL},
+	{ 0x08, 12284069UL},
+	{ 0x09, 6274509UL},
+	{ 0x0A, 3121951UL},
+	{ 0x0B, 1560975UL},
+	{ 0x0C, 781440UL},
+	{ 0x0D, 390720UL},
+	{ 0x0E, 195300UL},
+	{ 0x0F, 97650UL},
+	{ 0x10, 48854UL},
+	{ 0x11, 24427UL},
+	{ 0x12, 12213UL},
+	{ 0x13, 6101UL},
+	{ 0x14, 3051UL},
+	{ 0x15, 1523UL},
+	{ 0x16, 761UL},
+	{ 0x00, 0UL},        /* scrubbing off */
+};
+
+/*
+ * Memory scrubber control interface. For K8, memory scrubbing is handled by
+ * hardware and can involve L2 cache, dcache as well as the main memory. With
+ * F10, this is extended to L3 cache scrubbing on CPU models sporting that
+ * functionality.
+ *
+ * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
+ * (dram) over to cache lines. This is nasty, so we will use bandwidth in
+ * bytes/sec for the setting.
+ *
+ * Currently, we only do dram scrubbing. If the scrubbing is done in software on
+ * other archs, we might not have access to the caches directly.
+ */
+
+/*
+ * scan the scrub rate mapping table for a close or matching bandwidth value to
+ * issue. If requested is too big, then use last maximum value found.
+ */
+static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw,
+				       u32 min_scrubrate)
+{
+	u32 scrubval;
+	int i;
+
+	/*
+	 * map the configured rate (new_bw) to a value specific to the AMD64
+	 * memory controller and apply to register. Search for the first
+	 * bandwidth entry that is greater or equal than the setting requested
+	 * and program that. If at last entry, turn off DRAM scrubbing.
+	 */
+	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
+		/*
+		 * skip scrub rates which aren't recommended
+		 * (see F10 BKDG, F3x58)
+		 */
+		if (scrubrates[i].scrubval < min_scrubrate)
+			continue;
+
+		if (scrubrates[i].bandwidth <= new_bw)
+			break;
+
+		/*
+		 * if no suitable bandwidth found, turn off DRAM scrubbing
+		 * entirely by falling back to the last element in the
+		 * scrubrates array.
+		 */
+	}
+
+	scrubval = scrubrates[i].scrubval;
+	if (scrubval)
+		edac_printk(KERN_DEBUG, EDAC_MC,
+			    "Setting scrub rate bandwidth: %u\n",
+			    scrubrates[i].bandwidth);
+	else
+		edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n");
+
+	pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
+
+	return 0;
+}
+
+static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	u32 min_scrubrate = 0x0;
+
+	switch (boot_cpu_data.x86) {
+	case 0xf:
+		min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
+		break;
+	case 0x10:
+		min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
+		break;
+	case 0x11:
+		min_scrubrate = F11_MIN_SCRUB_RATE_BITS;
+		break;
+
+	default:
+		amd64_printk(KERN_ERR, "Unsupported family!\n");
+		break;
+	}
+	return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth,
+			min_scrubrate);
+}
+
+static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	u32 scrubval = 0;
+	int status = -1, i, ret = 0;
+
+	ret = pci_read_config_dword(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
+	if (ret)
+		debugf0("Reading K8_SCRCTRL failed\n");
+
+	scrubval = scrubval & 0x001F;
+
+	edac_printk(KERN_DEBUG, EDAC_MC,
+		    "pci-read, sdram scrub control value: %d \n", scrubval);
+
+	for (i = 0; ARRAY_SIZE(scrubrates); i++) {
+		if (scrubrates[i].scrubval == scrubval) {
+			*bw = scrubrates[i].bandwidth;
+			status = 0;
+			break;
+		}
+	}
+
+	return status;
+}
+
+/* Map from a CSROW entry to the mask entry that operates on it */
+static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
+{
+	if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F)
+		return csrow;
+	else
+		return csrow >> 1;
+}
+
+/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
+static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
+{
+	if (dct == 0)
+		return pvt->dcsb0[csrow];
+	else
+		return pvt->dcsb1[csrow];
+}
+
+/*
+ * Return the 'mask' address the i'th CS entry. This function is needed because
+ * there number of DCSM registers on Rev E and prior vs Rev F and later is
+ * different.
+ */
+static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
+{
+	if (dct == 0)
+		return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
+	else
+		return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
+}
+
+
+/*
+ * In *base and *limit, pass back the full 40-bit base and limit physical
+ * addresses for the node given by node_id.  This information is obtained from
+ * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
+ * base and limit addresses are of type SysAddr, as defined at the start of
+ * section 3.4.4 (p. 70).  They are the lowest and highest physical addresses
+ * in the address range they represent.
+ */
+static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
+			       u64 *base, u64 *limit)
+{
+	*base = pvt->dram_base[node_id];
+	*limit = pvt->dram_limit[node_id];
+}
+
+/*
+ * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
+ * with node_id
+ */
+static int amd64_base_limit_match(struct amd64_pvt *pvt,
+					u64 sys_addr, int node_id)
+{
+	u64 base, limit, addr;
+
+	amd64_get_base_and_limit(pvt, node_id, &base, &limit);
+
+	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
+	 * all ones if the most significant implemented address bit is 1.
+	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
+	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
+	 * Application Programming.
+	 */
+	addr = sys_addr & 0x000000ffffffffffull;
+
+	return (addr >= base) && (addr <= limit);
+}
+
+/*
+ * Attempt to map a SysAddr to a node. On success, return a pointer to the
+ * mem_ctl_info structure for the node that the SysAddr maps to.
+ *
+ * On failure, return NULL.
+ */
+static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
+						u64 sys_addr)
+{
+	struct amd64_pvt *pvt;
+	int node_id;
+	u32 intlv_en, bits;
+
+	/*
+	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
+	 * 3.4.4.2) registers to map the SysAddr to a node ID.
+	 */
+	pvt = mci->pvt_info;
+
+	/*
+	 * The value of this field should be the same for all DRAM Base
+	 * registers.  Therefore we arbitrarily choose to read it from the
+	 * register for node 0.
+	 */
+	intlv_en = pvt->dram_IntlvEn[0];
+
+	if (intlv_en == 0) {
+		for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
+			if (amd64_base_limit_match(pvt, sys_addr, node_id))
+				goto found;
+		}
+		goto err_no_match;
+	}
+
+	if (unlikely((intlv_en != 0x01) &&
+		     (intlv_en != 0x03) &&
+		     (intlv_en != 0x07))) {
+		amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
+			     "IntlvEn field of DRAM Base Register for node 0: "
+			     "this probably indicates a BIOS bug.\n", intlv_en);
+		return NULL;
+	}
+
+	bits = (((u32) sys_addr) >> 12) & intlv_en;
+
+	for (node_id = 0; ; ) {
+		if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
+			break;	/* intlv_sel field matches */
+
+		if (++node_id >= DRAM_REG_COUNT)
+			goto err_no_match;
+	}
+
+	/* sanity test for sys_addr */
+	if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
+		amd64_printk(KERN_WARNING,
+			     "%s(): sys_addr 0x%llx falls outside base/limit "
+			     "address range for node %d with node interleaving "
+			     "enabled.\n",
+			     __func__, sys_addr, node_id);
+		return NULL;
+	}
+
+found:
+	return edac_mc_find(node_id);
+
+err_no_match:
+	debugf2("sys_addr 0x%lx doesn't match any node\n",
+		(unsigned long)sys_addr);
+
+	return NULL;
+}
+
+/*
+ * Extract the DRAM CS base address from selected csrow register.
+ */
+static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
+{
+	return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
+				pvt->dcs_shift;
+}
+
+/*
+ * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
+ */
+static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
+{
+	u64 dcsm_bits, other_bits;
+	u64 mask;
+
+	/* Extract bits from DRAM CS Mask. */
+	dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
+
+	other_bits = pvt->dcsm_mask;
+	other_bits = ~(other_bits << pvt->dcs_shift);
+
+	/*
+	 * The extracted bits from DCSM belong in the spaces represented by
+	 * the cleared bits in other_bits.
+	 */
+	mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
+
+	return mask;
+}
+
+/*
+ * @input_addr is an InputAddr associated with the node given by mci. Return the
+ * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
+ */
+static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
+{
+	struct amd64_pvt *pvt;
+	int csrow;
+	u64 base, mask;
+
+	pvt = mci->pvt_info;
+
+	/*
+	 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
+	 * base/mask register pair, test the condition shown near the start of
+	 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
+	 */
+	for (csrow = 0; csrow < pvt->cs_count; csrow++) {
+
+		/* This DRAM chip select is disabled on this node */
+		if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
+			continue;
+
+		base = base_from_dct_base(pvt, csrow);
+		mask = ~mask_from_dct_mask(pvt, csrow);
+
+		if ((input_addr & mask) == (base & mask)) {
+			debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
+				(unsigned long)input_addr, csrow,
+				pvt->mc_node_id);
+
+			return csrow;
+		}
+	}
+
+	debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
+		(unsigned long)input_addr, pvt->mc_node_id);
+
+	return -1;
+}
+
+/*
+ * Return the base value defined by the DRAM Base register for the node
+ * represented by mci.  This function returns the full 40-bit value despite the
+ * fact that the register only stores bits 39-24 of the value. See section
+ * 3.4.4.1 (BKDG #26094, K8, revA-E)
+ */
+static inline u64 get_dram_base(struct mem_ctl_info *mci)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+
+	return pvt->dram_base[pvt->mc_node_id];
+}
+
+/*
+ * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
+ * for the node represented by mci. Info is passed back in *hole_base,
+ * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
+ * info is invalid. Info may be invalid for either of the following reasons:
+ *
+ * - The revision of the node is not E or greater.  In this case, the DRAM Hole
+ *   Address Register does not exist.
+ *
+ * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
+ *   indicating that its contents are not valid.
+ *
+ * The values passed back in *hole_base, *hole_offset, and *hole_size are
+ * complete 32-bit values despite the fact that the bitfields in the DHAR
+ * only represent bits 31-24 of the base and offset values.
+ */
+int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
+			     u64 *hole_offset, u64 *hole_size)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	u64 base;
+
+	/* only revE and later have the DRAM Hole Address Register */
+	if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_E) {
+		debugf1("  revision %d for node %d does not support DHAR\n",
+			pvt->ext_model, pvt->mc_node_id);
+		return 1;
+	}
+
+	/* only valid for Fam10h */
+	if (boot_cpu_data.x86 == 0x10 &&
+	    (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
+		debugf1("  Dram Memory Hoisting is DISABLED on this system\n");
+		return 1;
+	}
+
+	if ((pvt->dhar & DHAR_VALID) == 0) {
+		debugf1("  Dram Memory Hoisting is DISABLED on this node %d\n",
+			pvt->mc_node_id);
+		return 1;
+	}
+
+	/* This node has Memory Hoisting */
+
+	/* +------------------+--------------------+--------------------+-----
+	 * | memory           | DRAM hole          | relocated          |
+	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
+	 * |                  |                    | DRAM hole          |
+	 * |                  |                    | [0x100000000,      |
+	 * |                  |                    |  (0x100000000+     |
+	 * |                  |                    |   (0xffffffff-x))] |
+	 * +------------------+--------------------+--------------------+-----
+	 *
+	 * Above is a diagram of physical memory showing the DRAM hole and the
+	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
+	 * starts at address x (the base address) and extends through address
+	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
+	 * addresses in the hole so that they start at 0x100000000.
+	 */
+
+	base = dhar_base(pvt->dhar);
+
+	*hole_base = base;
+	*hole_size = (0x1ull << 32) - base;
+
+	if (boot_cpu_data.x86 > 0xf)
+		*hole_offset = f10_dhar_offset(pvt->dhar);
+	else
+		*hole_offset = k8_dhar_offset(pvt->dhar);
+
+	debugf1("  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
+		pvt->mc_node_id, (unsigned long)*hole_base,
+		(unsigned long)*hole_offset, (unsigned long)*hole_size);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
+
+/*
+ * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
+ * assumed that sys_addr maps to the node given by mci.
+ *
+ * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
+ * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
+ * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
+ * then it is also involved in translating a SysAddr to a DramAddr. Sections
+ * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
+ * These parts of the documentation are unclear. I interpret them as follows:
+ *
+ * When node n receives a SysAddr, it processes the SysAddr as follows:
+ *
+ * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
+ *    Limit registers for node n. If the SysAddr is not within the range
+ *    specified by the base and limit values, then node n ignores the Sysaddr
+ *    (since it does not map to node n). Otherwise continue to step 2 below.
+ *
+ * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
+ *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
+ *    the range of relocated addresses (starting at 0x100000000) from the DRAM
+ *    hole. If not, skip to step 3 below. Else get the value of the
+ *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
+ *    offset defined by this value from the SysAddr.
+ *
+ * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
+ *    Base register for node n. To obtain the DramAddr, subtract the base
+ *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
+ */
+static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
+{
+	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
+	int ret = 0;
+
+	dram_base = get_dram_base(mci);
+
+	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
+				      &hole_size);
+	if (!ret) {
+		if ((sys_addr >= (1ull << 32)) &&
+		    (sys_addr < ((1ull << 32) + hole_size))) {
+			/* use DHAR to translate SysAddr to DramAddr */
+			dram_addr = sys_addr - hole_offset;
+
+			debugf2("using DHAR to translate SysAddr 0x%lx to "
+				"DramAddr 0x%lx\n",
+				(unsigned long)sys_addr,
+				(unsigned long)dram_addr);
+
+			return dram_addr;
+		}
+	}
+
+	/*
+	 * Translate the SysAddr to a DramAddr as shown near the start of
+	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
+	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
+	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
+	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
+	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
+	 * Programmer's Manual Volume 1 Application Programming.
+	 */
+	dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
+
+	debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
+		"DramAddr 0x%lx\n", (unsigned long)sys_addr,
+		(unsigned long)dram_addr);
+	return dram_addr;
+}
+
+/*
+ * @intlv_en is the value of the IntlvEn field from a DRAM Base register
+ * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
+ * for node interleaving.
+ */
+static int num_node_interleave_bits(unsigned intlv_en)
+{
+	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
+	int n;
+
+	BUG_ON(intlv_en > 7);
+	n = intlv_shift_table[intlv_en];
+	return n;
+}
+
+/* Translate the DramAddr given by @dram_addr to an InputAddr. */
+static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
+{
+	struct amd64_pvt *pvt;
+	int intlv_shift;
+	u64 input_addr;
+
+	pvt = mci->pvt_info;
+
+	/*
+	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
+	 * concerning translating a DramAddr to an InputAddr.
+	 */
+	intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
+	input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
+	    (dram_addr & 0xfff);
+
+	debugf2("  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
+		intlv_shift, (unsigned long)dram_addr,
+		(unsigned long)input_addr);
+
+	return input_addr;
+}
+
+/*
+ * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
+ * assumed that @sys_addr maps to the node given by mci.
+ */
+static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
+{
+	u64 input_addr;
+
+	input_addr =
+	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
+
+	debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
+		(unsigned long)sys_addr, (unsigned long)input_addr);
+
+	return input_addr;
+}
+
+
+/*
+ * @input_addr is an InputAddr associated with the node represented by mci.
+ * Translate @input_addr to a DramAddr and return the result.
+ */
+static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
+{
+	struct amd64_pvt *pvt;
+	int node_id, intlv_shift;
+	u64 bits, dram_addr;
+	u32 intlv_sel;
+
+	/*
+	 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
+	 * shows how to translate a DramAddr to an InputAddr. Here we reverse
+	 * this procedure. When translating from a DramAddr to an InputAddr, the
+	 * bits used for node interleaving are discarded.  Here we recover these
+	 * bits from the IntlvSel field of the DRAM Limit register (section
+	 * 3.4.4.2) for the node that input_addr is associated with.
+	 */
+	pvt = mci->pvt_info;
+	node_id = pvt->mc_node_id;
+	BUG_ON((node_id < 0) || (node_id > 7));
+
+	intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
+
+	if (intlv_shift == 0) {
+		debugf1("    InputAddr 0x%lx translates to DramAddr of "
+			"same value\n",	(unsigned long)input_addr);
+
+		return input_addr;
+	}
+
+	bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
+	    (input_addr & 0xfff);
+
+	intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
+	dram_addr = bits + (intlv_sel << 12);
+
+	debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
+		"(%d node interleave bits)\n", (unsigned long)input_addr,
+		(unsigned long)dram_addr, intlv_shift);
+
+	return dram_addr;
+}
+
+/*
+ * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
+ * @dram_addr to a SysAddr.
+ */
+static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
+	int ret = 0;
+
+	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
+				      &hole_size);
+	if (!ret) {
+		if ((dram_addr >= hole_base) &&
+		    (dram_addr < (hole_base + hole_size))) {
+			sys_addr = dram_addr + hole_offset;
+
+			debugf1("using DHAR to translate DramAddr 0x%lx to "
+				"SysAddr 0x%lx\n", (unsigned long)dram_addr,
+				(unsigned long)sys_addr);
+
+			return sys_addr;
+		}
+	}
+
+	amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
+	sys_addr = dram_addr + base;
+
+	/*
+	 * The sys_addr we have computed up to this point is a 40-bit value
+	 * because the k8 deals with 40-bit values.  However, the value we are
+	 * supposed to return is a full 64-bit physical address.  The AMD
+	 * x86-64 architecture specifies that the most significant implemented
+	 * address bit through bit 63 of a physical address must be either all
+	 * 0s or all 1s.  Therefore we sign-extend the 40-bit sys_addr to a
+	 * 64-bit value below.  See section 3.4.2 of AMD publication 24592:
+	 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
+	 * Programming.
+	 */
+	sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
+
+	debugf1("    Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
+		pvt->mc_node_id, (unsigned long)dram_addr,
+		(unsigned long)sys_addr);
+
+	return sys_addr;
+}
+
+/*
+ * @input_addr is an InputAddr associated with the node given by mci. Translate
+ * @input_addr to a SysAddr.
+ */
+static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
+					 u64 input_addr)
+{
+	return dram_addr_to_sys_addr(mci,
+				     input_addr_to_dram_addr(mci, input_addr));
+}
+
+/*
+ * Find the minimum and maximum InputAddr values that map to the given @csrow.
+ * Pass back these values in *input_addr_min and *input_addr_max.
+ */
+static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
+			      u64 *input_addr_min, u64 *input_addr_max)
+{
+	struct amd64_pvt *pvt;
+	u64 base, mask;
+
+	pvt = mci->pvt_info;
+	BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
+
+	base = base_from_dct_base(pvt, csrow);
+	mask = mask_from_dct_mask(pvt, csrow);
+
+	*input_addr_min = base & ~mask;
+	*input_addr_max = base | mask | pvt->dcs_mask_notused;
+}
+
+/*
+ * Extract error address from MCA NB Address Low (section 3.6.4.5) and MCA NB
+ * Address High (section 3.6.4.6) register values and return the result. Address
+ * is located in the info structure (nbeah and nbeal), the encoding is device
+ * specific.
+ */
+static u64 extract_error_address(struct mem_ctl_info *mci,
+				 struct err_regs *info)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+
+	return pvt->ops->get_error_address(mci, info);
+}
+
+
+/* Map the Error address to a PAGE and PAGE OFFSET. */
+static inline void error_address_to_page_and_offset(u64 error_address,
+						    u32 *page, u32 *offset)
+{
+	*page = (u32) (error_address >> PAGE_SHIFT);
+	*offset = ((u32) error_address) & ~PAGE_MASK;
+}
+
+/*
+ * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
+ * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
+ * of a node that detected an ECC memory error.  mci represents the node that
+ * the error address maps to (possibly different from the node that detected
+ * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
+ * error.
+ */
+static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
+{
+	int csrow;
+
+	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
+
+	if (csrow == -1)
+		amd64_mc_printk(mci, KERN_ERR,
+			     "Failed to translate InputAddr to csrow for "
+			     "address 0x%lx\n", (unsigned long)sys_addr);
+	return csrow;
+}
+
+static int get_channel_from_ecc_syndrome(unsigned short syndrome);
+
+static void amd64_cpu_display_info(struct amd64_pvt *pvt)
+{
+	if (boot_cpu_data.x86 == 0x11)
+		edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n");
+	else if (boot_cpu_data.x86 == 0x10)
+		edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n");
+	else if (boot_cpu_data.x86 == 0xf)
+		edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n",
+			(pvt->ext_model >= OPTERON_CPU_REV_F) ?
+			"Rev F or later" : "Rev E or earlier");
+	else
+		/* we'll hardly ever ever get here */
+		edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n");
+}
+
+/*
+ * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
+ * are ECC capable.
+ */
+static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
+{
+	int bit;
+	enum dev_type edac_cap = EDAC_FLAG_NONE;
+
+	bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F)
+		? 19
+		: 17;
+
+	if (pvt->dclr0 & BIT(bit))
+		edac_cap = EDAC_FLAG_SECDED;
+
+	return edac_cap;
+}
+
+
+static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
+					 int ganged);
+
+/* Display and decode various NB registers for debug purposes. */
+static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
+{
+	int ganged;
+
+	debugf1("  nbcap:0x%8.08x DctDualCap=%s DualNode=%s 8-Node=%s\n",
+		pvt->nbcap,
+		(pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "True" : "False",
+		(pvt->nbcap & K8_NBCAP_DUAL_NODE) ? "True" : "False",
+		(pvt->nbcap & K8_NBCAP_8_NODE) ? "True" : "False");
+	debugf1("    ECC Capable=%s   ChipKill Capable=%s\n",
+		(pvt->nbcap & K8_NBCAP_SECDED) ? "True" : "False",
+		(pvt->nbcap & K8_NBCAP_CHIPKILL) ? "True" : "False");
+	debugf1("  DramCfg0-low=0x%08x DIMM-ECC=%s Parity=%s Width=%s\n",
+		pvt->dclr0,
+		(pvt->dclr0 & BIT(19)) ?  "Enabled" : "Disabled",
+		(pvt->dclr0 & BIT(8)) ?  "Enabled" : "Disabled",
+		(pvt->dclr0 & BIT(11)) ?  "128b" : "64b");
+	debugf1("    DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s  DIMM Type=%s\n",
+		(pvt->dclr0 & BIT(12)) ?  "Y" : "N",
+		(pvt->dclr0 & BIT(13)) ?  "Y" : "N",
+		(pvt->dclr0 & BIT(14)) ?  "Y" : "N",
+		(pvt->dclr0 & BIT(15)) ?  "Y" : "N",
+		(pvt->dclr0 & BIT(16)) ?  "UN-Buffered" : "Buffered");
+
+
+	debugf1("  online-spare: 0x%8.08x\n", pvt->online_spare);
+
+	if (boot_cpu_data.x86 == 0xf) {
+		debugf1("  dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
+			pvt->dhar, dhar_base(pvt->dhar),
+			k8_dhar_offset(pvt->dhar));
+		debugf1("      DramHoleValid=%s\n",
+			(pvt->dhar & DHAR_VALID) ?  "True" : "False");
+
+		debugf1("  dbam-dkt: 0x%8.08x\n", pvt->dbam0);
+
+		/* everything below this point is Fam10h and above */
+		return;
+
+	} else {
+		debugf1("  dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
+			pvt->dhar, dhar_base(pvt->dhar),
+			f10_dhar_offset(pvt->dhar));
+		debugf1("    DramMemHoistValid=%s DramHoleValid=%s\n",
+			(pvt->dhar & F10_DRAM_MEM_HOIST_VALID) ?
+			"True" : "False",
+			(pvt->dhar & DHAR_VALID) ?
+			"True" : "False");
+	}
+
+	/* Only if NOT ganged does dcl1 have valid info */
+	if (!dct_ganging_enabled(pvt)) {
+		debugf1("  DramCfg1-low=0x%08x DIMM-ECC=%s Parity=%s "
+			"Width=%s\n", pvt->dclr1,
+			(pvt->dclr1 & BIT(19)) ?  "Enabled" : "Disabled",
+			(pvt->dclr1 & BIT(8)) ?  "Enabled" : "Disabled",
+			(pvt->dclr1 & BIT(11)) ?  "128b" : "64b");
+		debugf1("    DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s  "
+			"DIMM Type=%s\n",
+			(pvt->dclr1 & BIT(12)) ?  "Y" : "N",
+			(pvt->dclr1 & BIT(13)) ?  "Y" : "N",
+			(pvt->dclr1 & BIT(14)) ?  "Y" : "N",
+			(pvt->dclr1 & BIT(15)) ?  "Y" : "N",
+			(pvt->dclr1 & BIT(16)) ?  "UN-Buffered" : "Buffered");
+	}
+
+	/*
+	 * Determine if ganged and then dump memory sizes for first controller,
+	 * and if NOT ganged dump info for 2nd controller.
+	 */
+	ganged = dct_ganging_enabled(pvt);
+
+	f10_debug_display_dimm_sizes(0, pvt, ganged);
+
+	if (!ganged)
+		f10_debug_display_dimm_sizes(1, pvt, ganged);
+}
+
+/* Read in both of DBAM registers */
+static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
+{
+	int err = 0;
+	unsigned int reg;
+
+	reg = DBAM0;
+	err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam0);
+	if (err)
+		goto err_reg;
+
+	if (boot_cpu_data.x86 >= 0x10) {
+		reg = DBAM1;
+		err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam1);
+
+		if (err)
+			goto err_reg;
+	}
+
+	return;
+
+err_reg:
+	debugf0("Error reading F2x%03x.\n", reg);
+}
+
+/*
+ * NOTE: CPU Revision Dependent code: Rev E and Rev F
+ *
+ * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
+ * set the shift factor for the DCSB and DCSM values.
+ *
+ * ->dcs_mask_notused, RevE:
+ *
+ * To find the max InputAddr for the csrow, start with the base address and set
+ * all bits that are "don't care" bits in the test at the start of section
+ * 3.5.4 (p. 84).
+ *
+ * The "don't care" bits are all set bits in the mask and all bits in the gaps
+ * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
+ * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
+ * gaps.
+ *
+ * ->dcs_mask_notused, RevF and later:
+ *
+ * To find the max InputAddr for the csrow, start with the base address and set
+ * all bits that are "don't care" bits in the test at the start of NPT section
+ * 4.5.4 (p. 87).
+ *
+ * The "don't care" bits are all set bits in the mask and all bits in the gaps
+ * between bit ranges [36:27] and [21:13].
+ *
+ * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
+ * which are all bits in the above-mentioned gaps.
+ */
+static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
+{
+
+	if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) {
+		pvt->dcsb_base		= REV_E_DCSB_BASE_BITS;
+		pvt->dcsm_mask		= REV_E_DCSM_MASK_BITS;
+		pvt->dcs_mask_notused	= REV_E_DCS_NOTUSED_BITS;
+		pvt->dcs_shift		= REV_E_DCS_SHIFT;
+		pvt->cs_count		= 8;
+		pvt->num_dcsm		= 8;
+	} else {
+		pvt->dcsb_base		= REV_F_F1Xh_DCSB_BASE_BITS;
+		pvt->dcsm_mask		= REV_F_F1Xh_DCSM_MASK_BITS;
+		pvt->dcs_mask_notused	= REV_F_F1Xh_DCS_NOTUSED_BITS;
+		pvt->dcs_shift		= REV_F_F1Xh_DCS_SHIFT;
+
+		if (boot_cpu_data.x86 == 0x11) {
+			pvt->cs_count = 4;
+			pvt->num_dcsm = 2;
+		} else {
+			pvt->cs_count = 8;
+			pvt->num_dcsm = 4;
+		}
+	}
+}
+
+/*
+ * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
+ */
+static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
+{
+	int cs, reg, err = 0;
+
+	amd64_set_dct_base_and_mask(pvt);
+
+	for (cs = 0; cs < pvt->cs_count; cs++) {
+		reg = K8_DCSB0 + (cs * 4);
+		err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
+						&pvt->dcsb0[cs]);
+		if (unlikely(err))
+			debugf0("Reading K8_DCSB0[%d] failed\n", cs);
+		else
+			debugf0("  DCSB0[%d]=0x%08x reg: F2x%x\n",
+				cs, pvt->dcsb0[cs], reg);
+
+		/* If DCT are NOT ganged, then read in DCT1's base */
+		if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
+			reg = F10_DCSB1 + (cs * 4);
+			err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
+							&pvt->dcsb1[cs]);
+			if (unlikely(err))
+				debugf0("Reading F10_DCSB1[%d] failed\n", cs);
+			else
+				debugf0("  DCSB1[%d]=0x%08x reg: F2x%x\n",
+					cs, pvt->dcsb1[cs], reg);
+		} else {
+			pvt->dcsb1[cs] = 0;
+		}
+	}
+
+	for (cs = 0; cs < pvt->num_dcsm; cs++) {
+		reg = K8_DCSM0 + (cs * 4);
+		err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
+					&pvt->dcsm0[cs]);
+		if (unlikely(err))
+			debugf0("Reading K8_DCSM0 failed\n");
+		else
+			debugf0("    DCSM0[%d]=0x%08x reg: F2x%x\n",
+				cs, pvt->dcsm0[cs], reg);
+
+		/* If DCT are NOT ganged, then read in DCT1's mask */
+		if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
+			reg = F10_DCSM1 + (cs * 4);
+			err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
+					&pvt->dcsm1[cs]);
+			if (unlikely(err))
+				debugf0("Reading F10_DCSM1[%d] failed\n", cs);
+			else
+				debugf0("    DCSM1[%d]=0x%08x reg: F2x%x\n",
+					cs, pvt->dcsm1[cs], reg);
+		} else
+			pvt->dcsm1[cs] = 0;
+	}
+}
+
+static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
+{
+	enum mem_type type;
+
+	if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= OPTERON_CPU_REV_F) {
+		/* Rev F and later */
+		type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
+	} else {
+		/* Rev E and earlier */
+		type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
+	}
+
+	debugf1("  Memory type is: %s\n",
+		(type == MEM_DDR2) ? "MEM_DDR2" :
+		(type == MEM_RDDR2) ? "MEM_RDDR2" :
+		(type == MEM_DDR) ? "MEM_DDR" : "MEM_RDDR");
+
+	return type;
+}
+
+/*
+ * Read the DRAM Configuration Low register. It differs between CG, D & E revs
+ * and the later RevF memory controllers (DDR vs DDR2)
+ *
+ * Return:
+ *      number of memory channels in operation
+ * Pass back:
+ *      contents of the DCL0_LOW register
+ */
+static int k8_early_channel_count(struct amd64_pvt *pvt)
+{
+	int flag, err = 0;
+
+	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
+	if (err)
+		return err;
+
+	if ((boot_cpu_data.x86_model >> 4) >= OPTERON_CPU_REV_F) {
+		/* RevF (NPT) and later */
+		flag = pvt->dclr0 & F10_WIDTH_128;
+	} else {
+		/* RevE and earlier */
+		flag = pvt->dclr0 & REVE_WIDTH_128;
+	}
+
+	/* not used */
+	pvt->dclr1 = 0;
+
+	return (flag) ? 2 : 1;
+}
+
+/* extract the ERROR ADDRESS for the K8 CPUs */
+static u64 k8_get_error_address(struct mem_ctl_info *mci,
+				struct err_regs *info)
+{
+	return (((u64) (info->nbeah & 0xff)) << 32) +
+			(info->nbeal & ~0x03);
+}
+
+/*
+ * Read the Base and Limit registers for K8 based Memory controllers; extract
+ * fields from the 'raw' reg into separate data fields
+ *
+ * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
+ */
+static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
+{
+	u32 low;
+	u32 off = dram << 3;	/* 8 bytes between DRAM entries */
+	int err;
+
+	err = pci_read_config_dword(pvt->addr_f1_ctl,
+				    K8_DRAM_BASE_LOW + off, &low);
+	if (err)
+		debugf0("Reading K8_DRAM_BASE_LOW failed\n");
+
+	/* Extract parts into separate data entries */
+	pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
+	pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
+	pvt->dram_rw_en[dram] = (low & 0x3);
+
+	err = pci_read_config_dword(pvt->addr_f1_ctl,
+				    K8_DRAM_LIMIT_LOW + off, &low);
+	if (err)
+		debugf0("Reading K8_DRAM_LIMIT_LOW failed\n");
+
+	/*
+	 * Extract parts into separate data entries. Limit is the HIGHEST memory
+	 * location of the region, so lower 24 bits need to be all ones
+	 */
+	pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
+	pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
+	pvt->dram_DstNode[dram] = (low & 0x7);
+}
+
+static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
+					struct err_regs *info,
+					u64 SystemAddress)
+{
+	struct mem_ctl_info *src_mci;
+	unsigned short syndrome;
+	int channel, csrow;
+	u32 page, offset;
+
+	/* Extract the syndrome parts and form a 16-bit syndrome */
+	syndrome  = HIGH_SYNDROME(info->nbsl) << 8;
+	syndrome |= LOW_SYNDROME(info->nbsh);
+
+	/* CHIPKILL enabled */
+	if (info->nbcfg & K8_NBCFG_CHIPKILL) {
+		channel = get_channel_from_ecc_syndrome(syndrome);
+		if (channel < 0) {
+			/*
+			 * Syndrome didn't map, so we don't know which of the
+			 * 2 DIMMs is in error. So we need to ID 'both' of them
+			 * as suspect.
+			 */
+			amd64_mc_printk(mci, KERN_WARNING,
+				       "unknown syndrome 0x%x - possible error "
+				       "reporting race\n", syndrome);
+			edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+			return;
+		}
+	} else {
+		/*
+		 * non-chipkill ecc mode
+		 *
+		 * The k8 documentation is unclear about how to determine the
+		 * channel number when using non-chipkill memory.  This method
+		 * was obtained from email communication with someone at AMD.
+		 * (Wish the email was placed in this comment - norsk)
+		 */
+		channel = ((SystemAddress & BIT(3)) != 0);
+	}
+
+	/*
+	 * Find out which node the error address belongs to. This may be
+	 * different from the node that detected the error.
+	 */
+	src_mci = find_mc_by_sys_addr(mci, SystemAddress);
+	if (!src_mci) {
+		amd64_mc_printk(mci, KERN_ERR,
+			     "failed to map error address 0x%lx to a node\n",
+			     (unsigned long)SystemAddress);
+		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+		return;
+	}
+
+	/* Now map the SystemAddress to a CSROW */
+	csrow = sys_addr_to_csrow(src_mci, SystemAddress);
+	if (csrow < 0) {
+		edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
+	} else {
+		error_address_to_page_and_offset(SystemAddress, &page, &offset);
+
+		edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
+				  channel, EDAC_MOD_STR);
+	}
+}
+
+/*
+ * determrine the number of PAGES in for this DIMM's size based on its DRAM
+ * Address Mapping.
+ *
+ * First step is to calc the number of bits to shift a value of 1 left to
+ * indicate show many pages. Start with the DBAM value as the starting bits,
+ * then proceed to adjust those shift bits, based on CPU rev and the table.
+ * See BKDG on the DBAM
+ */
+static int k8_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
+{
+	int nr_pages;
+
+	if (pvt->ext_model >= OPTERON_CPU_REV_F) {
+		nr_pages = 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
+	} else {
+		/*
+		 * RevE and less section; this line is tricky. It collapses the
+		 * table used by RevD and later to one that matches revisions CG
+		 * and earlier.
+		 */
+		dram_map -= (pvt->ext_model >= OPTERON_CPU_REV_D) ?
+				(dram_map > 8 ? 4 : (dram_map > 5 ?
+				3 : (dram_map > 2 ? 1 : 0))) : 0;
+
+		/* 25 shift is 32MiB minimum DIMM size in RevE and prior */
+		nr_pages = 1 << (dram_map + 25 - PAGE_SHIFT);
+	}
+
+	return nr_pages;
+}
+
+/*
+ * Get the number of DCT channels in use.
+ *
+ * Return:
+ *	number of Memory Channels in operation
+ * Pass back:
+ *	contents of the DCL0_LOW register
+ */
+static int f10_early_channel_count(struct amd64_pvt *pvt)
+{
+	int dbams[] = { DBAM0, DBAM1 };
+	int err = 0, channels = 0;
+	int i, j;
+	u32 dbam;
+
+	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
+	if (err)
+		goto err_reg;
+
+	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
+	if (err)
+		goto err_reg;
+
+	/* If we are in 128 bit mode, then we are using 2 channels */
+	if (pvt->dclr0 & F10_WIDTH_128) {
+		debugf0("Data WIDTH is 128 bits - 2 channels\n");
+		channels = 2;
+		return channels;
+	}
+
+	/*
+	 * Need to check if in UN-ganged mode: In such, there are 2 channels,
+	 * but they are NOT in 128 bit mode and thus the above 'dcl0' status bit
+	 * will be OFF.
+	 *
+	 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
+	 * their CSEnable bit on. If so, then SINGLE DIMM case.
+	 */
+	debugf0("Data WIDTH is NOT 128 bits - need more decoding\n");
+
+	/*
+	 * Check DRAM Bank Address Mapping values for each DIMM to see if there
+	 * is more than just one DIMM present in unganged mode. Need to check
+	 * both controllers since DIMMs can be placed in either one.
+	 */
+	for (i = 0; i < ARRAY_SIZE(dbams); i++) {
+		err = pci_read_config_dword(pvt->dram_f2_ctl, dbams[i], &dbam);
+		if (err)
+			goto err_reg;
+
+		for (j = 0; j < 4; j++) {
+			if (DBAM_DIMM(j, dbam) > 0) {
+				channels++;
+				break;
+			}
+		}
+	}
+
+	debugf0("MCT channel count: %d\n", channels);
+
+	return channels;
+
+err_reg:
+	return -1;
+
+}
+
+static int f10_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
+{
+	return 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
+}
+
+/* Enable extended configuration access via 0xCF8 feature */
+static void amd64_setup(struct amd64_pvt *pvt)
+{
+	u32 reg;
+
+	pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
+
+	pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
+	reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
+	pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
+}
+
+/* Restore the extended configuration access via 0xCF8 feature */
+static void amd64_teardown(struct amd64_pvt *pvt)
+{
+	u32 reg;
+
+	pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
+
+	reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
+	if (pvt->flags.cf8_extcfg)
+		reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
+	pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
+}
+
+static u64 f10_get_error_address(struct mem_ctl_info *mci,
+			struct err_regs *info)
+{
+	return (((u64) (info->nbeah & 0xffff)) << 32) +
+			(info->nbeal & ~0x01);
+}
+
+/*
+ * Read the Base and Limit registers for F10 based Memory controllers. Extract
+ * fields from the 'raw' reg into separate data fields.
+ *
+ * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
+ */
+static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
+{
+	u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
+
+	low_offset = K8_DRAM_BASE_LOW + (dram << 3);
+	high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
+
+	/* read the 'raw' DRAM BASE Address register */
+	pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_base);
+
+	/* Read from the ECS data register */
+	pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_base);
+
+	/* Extract parts into separate data entries */
+	pvt->dram_rw_en[dram] = (low_base & 0x3);
+
+	if (pvt->dram_rw_en[dram] == 0)
+		return;
+
+	pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
+
+	pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
+			       (((u64)low_base  & 0xFFFF0000) << 8);
+
+	low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
+	high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
+
+	/* read the 'raw' LIMIT registers */
+	pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_limit);
+
+	/* Read from the ECS data register for the HIGH portion */
+	pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_limit);
+
+	debugf0("  HW Regs: BASE=0x%08x-%08x      LIMIT=  0x%08x-%08x\n",
+		high_base, low_base, high_limit, low_limit);
+
+	pvt->dram_DstNode[dram] = (low_limit & 0x7);
+	pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
+
+	/*
+	 * Extract address values and form a LIMIT address. Limit is the HIGHEST
+	 * memory location of the region, so low 24 bits need to be all ones.
+	 */
+	pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
+				(((u64) low_limit & 0xFFFF0000) << 8) |
+				0x00FFFFFF;
+}
+
+static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
+{
+	int err = 0;
+
+	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
+				    &pvt->dram_ctl_select_low);
+	if (err) {
+		debugf0("Reading F10_DCTL_SEL_LOW failed\n");
+	} else {
+		debugf0("DRAM_DCTL_SEL_LOW=0x%x  DctSelBaseAddr=0x%x\n",
+			pvt->dram_ctl_select_low, dct_sel_baseaddr(pvt));
+
+		debugf0("  DRAM DCTs are=%s DRAM Is=%s DRAM-Ctl-"
+				"sel-hi-range=%s\n",
+			(dct_ganging_enabled(pvt) ? "GANGED" : "NOT GANGED"),
+			(dct_dram_enabled(pvt) ? "Enabled"   : "Disabled"),
+			(dct_high_range_enabled(pvt) ? "Enabled" : "Disabled"));
+
+		debugf0("  DctDatIntLv=%s MemCleared=%s DctSelIntLvAddr=0x%x\n",
+			(dct_data_intlv_enabled(pvt) ? "Enabled" : "Disabled"),
+			(dct_memory_cleared(pvt) ? "True " : "False "),
+			dct_sel_interleave_addr(pvt));
+	}
+
+	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
+				    &pvt->dram_ctl_select_high);
+	if (err)
+		debugf0("Reading F10_DCTL_SEL_HIGH failed\n");
+}
+
+/*
+ * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
+ * Interleaving Modes.
+ */
+static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
+				int hi_range_sel, u32 intlv_en)
+{
+	u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
+
+	if (dct_ganging_enabled(pvt))
+		cs = 0;
+	else if (hi_range_sel)
+		cs = dct_sel_high;
+	else if (dct_interleave_enabled(pvt)) {
+		/*
+		 * see F2x110[DctSelIntLvAddr] - channel interleave mode
+		 */
+		if (dct_sel_interleave_addr(pvt) == 0)
+			cs = sys_addr >> 6 & 1;
+		else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
+			temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
+
+			if (dct_sel_interleave_addr(pvt) & 1)
+				cs = (sys_addr >> 9 & 1) ^ temp;
+			else
+				cs = (sys_addr >> 6 & 1) ^ temp;
+		} else if (intlv_en & 4)
+			cs = sys_addr >> 15 & 1;
+		else if (intlv_en & 2)
+			cs = sys_addr >> 14 & 1;
+		else if (intlv_en & 1)
+			cs = sys_addr >> 13 & 1;
+		else
+			cs = sys_addr >> 12 & 1;
+	} else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
+		cs = ~dct_sel_high & 1;
+	else
+		cs = 0;
+
+	return cs;
+}
+
+static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
+{
+	if (intlv_en == 1)
+		return 1;
+	else if (intlv_en == 3)
+		return 2;
+	else if (intlv_en == 7)
+		return 3;
+
+	return 0;
+}
+
+/* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
+static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
+						 u32 dct_sel_base_addr,
+						 u64 dct_sel_base_off,
+						 u32 hole_valid, u32 hole_off,
+						 u64 dram_base)
+{
+	u64 chan_off;
+
+	if (hi_range_sel) {
+		if (!(dct_sel_base_addr & 0xFFFFF800) &&
+		   hole_valid && (sys_addr >= 0x100000000ULL))
+			chan_off = hole_off << 16;
+		else
+			chan_off = dct_sel_base_off;
+	} else {
+		if (hole_valid && (sys_addr >= 0x100000000ULL))
+			chan_off = hole_off << 16;
+		else
+			chan_off = dram_base & 0xFFFFF8000000ULL;
+	}
+
+	return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
+			(chan_off & 0x0000FFFFFF800000ULL);
+}
+
+/* Hack for the time being - Can we get this from BIOS?? */
+#define	CH0SPARE_RANK	0
+#define	CH1SPARE_RANK	1
+
+/*
+ * checks if the csrow passed in is marked as SPARED, if so returns the new
+ * spare row
+ */
+static inline int f10_process_possible_spare(int csrow,
+				u32 cs, struct amd64_pvt *pvt)
+{
+	u32 swap_done;
+	u32 bad_dram_cs;
+
+	/* Depending on channel, isolate respective SPARING info */
+	if (cs) {
+		swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
+		bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
+		if (swap_done && (csrow == bad_dram_cs))
+			csrow = CH1SPARE_RANK;
+	} else {
+		swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
+		bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
+		if (swap_done && (csrow == bad_dram_cs))
+			csrow = CH0SPARE_RANK;
+	}
+	return csrow;
+}
+
+/*
+ * Iterate over the DRAM DCT "base" and "mask" registers looking for a
+ * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
+ *
+ * Return:
+ *	-EINVAL:  NOT FOUND
+ *	0..csrow = Chip-Select Row
+ */
+static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
+{
+	struct mem_ctl_info *mci;
+	struct amd64_pvt *pvt;
+	u32 cs_base, cs_mask;
+	int cs_found = -EINVAL;
+	int csrow;
+
+	mci = mci_lookup[nid];
+	if (!mci)
+		return cs_found;
+
+	pvt = mci->pvt_info;
+
+	debugf1("InputAddr=0x%x  channelselect=%d\n", in_addr, cs);
+
+	for (csrow = 0; csrow < pvt->cs_count; csrow++) {
+
+		cs_base = amd64_get_dct_base(pvt, cs, csrow);
+		if (!(cs_base & K8_DCSB_CS_ENABLE))
+			continue;
+
+		/*
+		 * We have an ENABLED CSROW, Isolate just the MASK bits of the
+		 * target: [28:19] and [13:5], which map to [36:27] and [21:13]
+		 * of the actual address.
+		 */
+		cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
+
+		/*
+		 * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
+		 * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
+		 */
+		cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
+
+		debugf1("    CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
+				csrow, cs_base, cs_mask);
+
+		cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
+
+		debugf1("              Final CSMask=0x%x\n", cs_mask);
+		debugf1("    (InputAddr & ~CSMask)=0x%x "
+				"(CSBase & ~CSMask)=0x%x\n",
+				(in_addr & ~cs_mask), (cs_base & ~cs_mask));
+
+		if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
+			cs_found = f10_process_possible_spare(csrow, cs, pvt);
+
+			debugf1(" MATCH csrow=%d\n", cs_found);
+			break;
+		}
+	}
+	return cs_found;
+}
+
+/* For a given @dram_range, check if @sys_addr falls within it. */
+static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
+				  u64 sys_addr, int *nid, int *chan_sel)
+{
+	int node_id, cs_found = -EINVAL, high_range = 0;
+	u32 intlv_en, intlv_sel, intlv_shift, hole_off;
+	u32 hole_valid, tmp, dct_sel_base, channel;
+	u64 dram_base, chan_addr, dct_sel_base_off;
+
+	dram_base = pvt->dram_base[dram_range];
+	intlv_en = pvt->dram_IntlvEn[dram_range];
+
+	node_id = pvt->dram_DstNode[dram_range];
+	intlv_sel = pvt->dram_IntlvSel[dram_range];
+
+	debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
+		dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
+
+	/*
+	 * This assumes that one node's DHAR is the same as all the other
+	 * nodes' DHAR.
+	 */
+	hole_off = (pvt->dhar & 0x0000FF80);
+	hole_valid = (pvt->dhar & 0x1);
+	dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
+
+	debugf1("   HoleOffset=0x%x  HoleValid=0x%x IntlvSel=0x%x\n",
+			hole_off, hole_valid, intlv_sel);
+
+	if (intlv_en ||
+	    (intlv_sel != ((sys_addr >> 12) & intlv_en)))
+		return -EINVAL;
+
+	dct_sel_base = dct_sel_baseaddr(pvt);
+
+	/*
+	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
+	 * select between DCT0 and DCT1.
+	 */
+	if (dct_high_range_enabled(pvt) &&
+	   !dct_ganging_enabled(pvt) &&
+	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
+		high_range = 1;
+
+	channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
+
+	chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
+					     dct_sel_base_off, hole_valid,
+					     hole_off, dram_base);
+
+	intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
+
+	/* remove Node ID (in case of memory interleaving) */
+	tmp = chan_addr & 0xFC0;
+
+	chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
+
+	/* remove channel interleave and hash */
+	if (dct_interleave_enabled(pvt) &&
+	   !dct_high_range_enabled(pvt) &&
+	   !dct_ganging_enabled(pvt)) {
+		if (dct_sel_interleave_addr(pvt) != 1)
+			chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
+		else {
+			tmp = chan_addr & 0xFC0;
+			chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
+					| tmp;
+		}
+	}
+
+	debugf1("   (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
+		chan_addr, (u32)(chan_addr >> 8));
+
+	cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
+
+	if (cs_found >= 0) {
+		*nid = node_id;
+		*chan_sel = channel;
+	}
+	return cs_found;
+}
+
+static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
+				       int *node, int *chan_sel)
+{
+	int dram_range, cs_found = -EINVAL;
+	u64 dram_base, dram_limit;
+
+	for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
+
+		if (!pvt->dram_rw_en[dram_range])
+			continue;
+
+		dram_base = pvt->dram_base[dram_range];
+		dram_limit = pvt->dram_limit[dram_range];
+
+		if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
+
+			cs_found = f10_match_to_this_node(pvt, dram_range,
+							  sys_addr, node,
+							  chan_sel);
+			if (cs_found >= 0)
+				break;
+		}
+	}
+	return cs_found;
+}
+
+/*
+ * This the F10h reference code from AMD to map a @sys_addr to NodeID,
+ * CSROW, Channel.
+ *
+ * The @sys_addr is usually an error address received from the hardware.
+ */
+static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
+				     struct err_regs *info,
+				     u64 sys_addr)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	u32 page, offset;
+	unsigned short syndrome;
+	int nid, csrow, chan = 0;
+
+	csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
+
+	if (csrow >= 0) {
+		error_address_to_page_and_offset(sys_addr, &page, &offset);
+
+		syndrome  = HIGH_SYNDROME(info->nbsl) << 8;
+		syndrome |= LOW_SYNDROME(info->nbsh);
+
+		/*
+		 * Is CHIPKILL on? If so, then we can attempt to use the
+		 * syndrome to isolate which channel the error was on.
+		 */
+		if (pvt->nbcfg & K8_NBCFG_CHIPKILL)
+			chan = get_channel_from_ecc_syndrome(syndrome);
+
+		if (chan >= 0) {
+			edac_mc_handle_ce(mci, page, offset, syndrome,
+					csrow, chan, EDAC_MOD_STR);
+		} else {
+			/*
+			 * Channel unknown, report all channels on this
+			 * CSROW as failed.
+			 */
+			for (chan = 0; chan < mci->csrows[csrow].nr_channels;
+								chan++) {
+					edac_mc_handle_ce(mci, page, offset,
+							syndrome,
+							csrow, chan,
+							EDAC_MOD_STR);
+			}
+		}
+
+	} else {
+		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+	}
+}
+
+/*
+ * Input (@index) is the DBAM DIMM value (1 of 4) used as an index into a shift
+ * table (revf_quad_ddr2_shift) which starts at 128MB DIMM size. Index of 0
+ * indicates an empty DIMM slot, as reported by Hardware on empty slots.
+ *
+ * Normalize to 128MB by subracting 27 bit shift.
+ */
+static int map_dbam_to_csrow_size(int index)
+{
+	int mega_bytes = 0;
+
+	if (index > 0 && index <= DBAM_MAX_VALUE)
+		mega_bytes = ((128 << (revf_quad_ddr2_shift[index]-27)));
+
+	return mega_bytes;
+}
+
+/*
+ * debug routine to display the memory sizes of a DIMM (ganged or not) and it
+ * CSROWs as well
+ */
+static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
+					 int ganged)
+{
+	int dimm, size0, size1;
+	u32 dbam;
+	u32 *dcsb;
+
+	debugf1("  dbam%d: 0x%8.08x  CSROW is %s\n", ctrl,
+			ctrl ? pvt->dbam1 : pvt->dbam0,
+			ganged ? "GANGED - dbam1 not used" : "NON-GANGED");
+
+	dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
+	dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
+
+	/* Dump memory sizes for DIMM and its CSROWs */
+	for (dimm = 0; dimm < 4; dimm++) {
+
+		size0 = 0;
+		if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
+			size0 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
+
+		size1 = 0;
+		if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
+			size1 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
+
+		debugf1("     CTRL-%d DIMM-%d=%5dMB   CSROW-%d=%5dMB "
+				"CSROW-%d=%5dMB\n",
+				ctrl,
+				dimm,
+				size0 + size1,
+				dimm * 2,
+				size0,
+				dimm * 2 + 1,
+				size1);
+	}
+}
+
+/*
+ * Very early hardware probe on pci_probe thread to determine if this module
+ * supports the hardware.
+ *
+ * Return:
+ *      0 for OK
+ *      1 for error
+ */
+static int f10_probe_valid_hardware(struct amd64_pvt *pvt)
+{
+	int ret = 0;
+
+	/*
+	 * If we are on a DDR3 machine, we don't know yet if
+	 * we support that properly at this time
+	 */
+	if ((pvt->dchr0 & F10_DCHR_Ddr3Mode) ||
+	    (pvt->dchr1 & F10_DCHR_Ddr3Mode)) {
+
+		amd64_printk(KERN_WARNING,
+			"%s() This machine is running with DDR3 memory. "
+			"This is not currently supported. "
+			"DCHR0=0x%x DCHR1=0x%x\n",
+			__func__, pvt->dchr0, pvt->dchr1);
+
+		amd64_printk(KERN_WARNING,
+			"   Contact '%s' module MAINTAINER to help add"
+			" support.\n",
+			EDAC_MOD_STR);
+
+		ret = 1;
+
+	}
+	return ret;
+}
+
+/*
+ * There currently are 3 types type of MC devices for AMD Athlon/Opterons
+ * (as per PCI DEVICE_IDs):
+ *
+ * Family K8: That is the Athlon64 and Opteron CPUs. They all have the same PCI
+ * DEVICE ID, even though there is differences between the different Revisions
+ * (CG,D,E,F).
+ *
+ * Family F10h and F11h.
+ *
+ */
+static struct amd64_family_type amd64_family_types[] = {
+	[K8_CPUS] = {
+		.ctl_name = "RevF",
+		.addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
+		.misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC,
+		.ops = {
+			.early_channel_count = k8_early_channel_count,
+			.get_error_address = k8_get_error_address,
+			.read_dram_base_limit = k8_read_dram_base_limit,
+			.map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
+			.dbam_map_to_pages = k8_dbam_map_to_pages,
+		}
+	},
+	[F10_CPUS] = {
+		.ctl_name = "Family 10h",
+		.addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP,
+		.misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC,
+		.ops = {
+			.probe_valid_hardware = f10_probe_valid_hardware,
+			.early_channel_count = f10_early_channel_count,
+			.get_error_address = f10_get_error_address,
+			.read_dram_base_limit = f10_read_dram_base_limit,
+			.read_dram_ctl_register = f10_read_dram_ctl_register,
+			.map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
+			.dbam_map_to_pages = f10_dbam_map_to_pages,
+		}
+	},
+	[F11_CPUS] = {
+		.ctl_name = "Family 11h",
+		.addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP,
+		.misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC,
+		.ops = {
+			.probe_valid_hardware = f10_probe_valid_hardware,
+			.early_channel_count = f10_early_channel_count,
+			.get_error_address = f10_get_error_address,
+			.read_dram_base_limit = f10_read_dram_base_limit,
+			.read_dram_ctl_register = f10_read_dram_ctl_register,
+			.map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
+			.dbam_map_to_pages = f10_dbam_map_to_pages,
+		}
+	},
+};
+
+static struct pci_dev *pci_get_related_function(unsigned int vendor,
+						unsigned int device,
+						struct pci_dev *related)
+{
+	struct pci_dev *dev = NULL;
+
+	dev = pci_get_device(vendor, device, dev);
+	while (dev) {
+		if ((dev->bus->number == related->bus->number) &&
+		    (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
+			break;
+		dev = pci_get_device(vendor, device, dev);
+	}
+
+	return dev;
+}
+
+/*
+ * syndrome mapping table for ECC ChipKill devices
+ *
+ * The comment in each row is the token (nibble) number that is in error.
+ * The least significant nibble of the syndrome is the mask for the bits
+ * that are in error (need to be toggled) for the particular nibble.
+ *
+ * Each row contains 16 entries.
+ * The first entry (0th) is the channel number for that row of syndromes.
+ * The remaining 15 entries are the syndromes for the respective Error
+ * bit mask index.
+ *
+ * 1st index entry is 0x0001 mask, indicating that the rightmost bit is the
+ * bit in error.
+ * The 2nd index entry is 0x0010 that the second bit is damaged.
+ * The 3rd index entry is 0x0011 indicating that the rightmost 2 bits
+ * are damaged.
+ * Thus so on until index 15, 0x1111, whose entry has the syndrome
+ * indicating that all 4 bits are damaged.
+ *
+ * A search is performed on this table looking for a given syndrome.
+ *
+ * See the AMD documentation for ECC syndromes. This ECC table is valid
+ * across all the versions of the AMD64 processors.
+ *
+ * A fast lookup is to use the LAST four bits of the 16-bit syndrome as a
+ * COLUMN index, then search all ROWS of that column, looking for a match
+ * with the input syndrome. The ROW value will be the token number.
+ *
+ * The 0'th entry on that row, can be returned as the CHANNEL (0 or 1) of this
+ * error.
+ */
+#define NUMBER_ECC_ROWS  36
+static const unsigned short ecc_chipkill_syndromes[NUMBER_ECC_ROWS][16] = {
+	/* Channel 0 syndromes */
+	{/*0*/  0, 0xe821, 0x7c32, 0x9413, 0xbb44, 0x5365, 0xc776, 0x2f57,
+	   0xdd88, 0x35a9, 0xa1ba, 0x499b, 0x66cc, 0x8eed, 0x1afe, 0xf2df },
+	{/*1*/  0, 0x5d31, 0xa612, 0xfb23, 0x9584, 0xc8b5, 0x3396, 0x6ea7,
+	   0xeac8, 0xb7f9, 0x4cda, 0x11eb, 0x7f4c, 0x227d, 0xd95e, 0x846f },
+	{/*2*/  0, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
+	   0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f },
+	{/*3*/  0, 0x2021, 0x3032, 0x1013, 0x4044, 0x6065, 0x7076, 0x5057,
+	   0x8088, 0xa0a9, 0xb0ba, 0x909b, 0xc0cc, 0xe0ed, 0xf0fe, 0xd0df },
+	{/*4*/  0, 0x5041, 0xa082, 0xf0c3, 0x9054, 0xc015, 0x30d6, 0x6097,
+	   0xe0a8, 0xb0e9, 0x402a, 0x106b, 0x70fc, 0x20bd, 0xd07e, 0x803f },
+	{/*5*/  0, 0xbe21, 0xd732, 0x6913, 0x2144, 0x9f65, 0xf676, 0x4857,
+	   0x3288, 0x8ca9, 0xe5ba, 0x5b9b, 0x13cc, 0xaded, 0xc4fe, 0x7adf },
+	{/*6*/  0, 0x4951, 0x8ea2, 0xc7f3, 0x5394, 0x1ac5, 0xdd36, 0x9467,
+	   0xa1e8, 0xe8b9, 0x2f4a, 0x661b, 0xf27c, 0xbb2d, 0x7cde, 0x358f },
+	{/*7*/  0, 0x74e1, 0x9872, 0xec93, 0xd6b4, 0xa255, 0x4ec6, 0x3a27,
+	   0x6bd8, 0x1f39, 0xf3aa, 0x874b, 0xbd6c, 0xc98d, 0x251e, 0x51ff },
+	{/*8*/  0, 0x15c1, 0x2a42, 0x3f83, 0xcef4, 0xdb35, 0xe4b6, 0xf177,
+	   0x4758, 0x5299, 0x6d1a, 0x78db, 0x89ac, 0x9c6d, 0xa3ee, 0xb62f },
+	{/*9*/  0, 0x3d01, 0x1602, 0x2b03, 0x8504, 0xb805, 0x9306, 0xae07,
+	   0xca08, 0xf709, 0xdc0a, 0xe10b, 0x4f0c, 0x720d, 0x590e, 0x640f },
+	{/*a*/  0, 0x9801, 0xec02, 0x7403, 0x6b04, 0xf305, 0x8706, 0x1f07,
+	   0xbd08, 0x2509, 0x510a, 0xc90b, 0xd60c, 0x4e0d, 0x3a0e, 0xa20f },
+	{/*b*/  0, 0xd131, 0x6212, 0xb323, 0x3884, 0xe9b5, 0x5a96, 0x8ba7,
+	   0x1cc8, 0xcdf9, 0x7eda, 0xafeb, 0x244c, 0xf57d, 0x465e, 0x976f },
+	{/*c*/  0, 0xe1d1, 0x7262, 0x93b3, 0xb834, 0x59e5, 0xca56, 0x2b87,
+	   0xdc18, 0x3dc9, 0xae7a, 0x4fab, 0x542c, 0x85fd, 0x164e, 0xf79f },
+	{/*d*/  0, 0x6051, 0xb0a2, 0xd0f3, 0x1094, 0x70c5, 0xa036, 0xc067,
+	   0x20e8, 0x40b9, 0x904a, 0x601b, 0x307c, 0x502d, 0x80de, 0xe08f },
+	{/*e*/  0, 0xa4c1, 0xf842, 0x5c83, 0xe6f4, 0x4235, 0x1eb6, 0xba77,
+	   0x7b58, 0xdf99, 0x831a, 0x27db, 0x9dac, 0x396d, 0x65ee, 0xc12f },
+	{/*f*/  0, 0x11c1, 0x2242, 0x3383, 0xc8f4, 0xd935, 0xeab6, 0xfb77,
+	   0x4c58, 0x5d99, 0x6e1a, 0x7fdb, 0x84ac, 0x956d, 0xa6ee, 0xb72f },
+
+	/* Channel 1 syndromes */
+	{/*10*/ 1, 0x45d1, 0x8a62, 0xcfb3, 0x5e34, 0x1be5, 0xd456, 0x9187,
+	   0xa718, 0xe2c9, 0x2d7a, 0x68ab, 0xf92c, 0xbcfd, 0x734e, 0x369f },
+	{/*11*/ 1, 0x63e1, 0xb172, 0xd293, 0x14b4, 0x7755, 0xa5c6, 0xc627,
+	   0x28d8, 0x4b39, 0x99aa, 0xfa4b, 0x3c6c, 0x5f8d, 0x8d1e, 0xeeff },
+	{/*12*/ 1, 0xb741, 0xd982, 0x6ec3, 0x2254, 0x9515, 0xfbd6, 0x4c97,
+	   0x33a8, 0x84e9, 0xea2a, 0x5d6b, 0x11fc, 0xa6bd, 0xc87e, 0x7f3f },
+	{/*13*/ 1, 0xdd41, 0x6682, 0xbbc3, 0x3554, 0xe815, 0x53d6, 0xce97,
+	   0x1aa8, 0xc7e9, 0x7c2a, 0xa1fb, 0x2ffc, 0xf2bd, 0x497e, 0x943f },
+	{/*14*/ 1, 0x2bd1, 0x3d62, 0x16b3, 0x4f34, 0x64e5, 0x7256, 0x5987,
+	   0x8518, 0xaec9, 0xb87a, 0x93ab, 0xca2c, 0xe1fd, 0xf74e, 0xdc9f },
+	{/*15*/ 1, 0x83c1, 0xc142, 0x4283, 0xa4f4, 0x2735, 0x65b6, 0xe677,
+	   0xf858, 0x7b99, 0x391a, 0xbadb, 0x5cac, 0xdf6d, 0x9dee, 0x1e2f },
+	{/*16*/ 1, 0x8fd1, 0xc562, 0x4ab3, 0xa934, 0x26e5, 0x6c56, 0xe387,
+	   0xfe18, 0x71c9, 0x3b7a, 0xb4ab, 0x572c, 0xd8fd, 0x924e, 0x1d9f },
+	{/*17*/ 1, 0x4791, 0x89e2, 0xce73, 0x5264, 0x15f5, 0xdb86, 0x9c17,
+	   0xa3b8, 0xe429, 0x2a5a, 0x6dcb, 0xf1dc, 0xb64d, 0x783e, 0x3faf },
+	{/*18*/ 1, 0x5781, 0xa9c2, 0xfe43, 0x92a4, 0xc525, 0x3b66, 0x6ce7,
+	   0xe3f8, 0xb479, 0x4a3a, 0x1dbb, 0x715c, 0x26dd, 0xd89e, 0x8f1f },
+	{/*19*/ 1, 0xbf41, 0xd582, 0x6ac3, 0x2954, 0x9615, 0xfcd6, 0x4397,
+	   0x3ea8, 0x81e9, 0xeb2a, 0x546b, 0x17fc, 0xa8bd, 0xc27e, 0x7d3f },
+	{/*1a*/ 1, 0x9891, 0xe1e2, 0x7273, 0x6464, 0xf7f5, 0x8586, 0x1617,
+	   0xb8b8, 0x2b29, 0x595a, 0xcacb, 0xdcdc, 0x4f4d, 0x3d3e, 0xaeaf },
+	{/*1b*/ 1, 0xcce1, 0x4472, 0x8893, 0xfdb4, 0x3f55, 0xb9c6, 0x7527,
+	   0x56d8, 0x9a39, 0x12aa, 0xde4b, 0xab6c, 0x678d, 0xef1e, 0x23ff },
+	{/*1c*/ 1, 0xa761, 0xf9b2, 0x5ed3, 0xe214, 0x4575, 0x1ba6, 0xbcc7,
+	   0x7328, 0xd449, 0x8a9a, 0x2dfb, 0x913c, 0x365d, 0x688e, 0xcfef },
+	{/*1d*/ 1, 0xff61, 0x55b2, 0xaad3, 0x7914, 0x8675, 0x2ca6, 0xd3c7,
+	   0x9e28, 0x6149, 0xcb9a, 0x34fb, 0xe73c, 0x185d, 0xb28e, 0x4def },
+	{/*1e*/ 1, 0x5451, 0xa8a2, 0xfcf3, 0x9694, 0xc2c5, 0x3e36, 0x6a67,
+	   0xebe8, 0xbfb9, 0x434a, 0x171b, 0x7d7c, 0x292d, 0xd5de, 0x818f },
+	{/*1f*/ 1, 0x6fc1, 0xb542, 0xda83, 0x19f4, 0x7635, 0xacb6, 0xc377,
+	   0x2e58, 0x4199, 0x9b1a, 0xf4db, 0x37ac, 0x586d, 0x82ee, 0xed2f },
+
+	/* ECC bits are also in the set of tokens and they too can go bad
+	 * first 2 cover channel 0, while the second 2 cover channel 1
+	 */
+	{/*20*/ 0, 0xbe01, 0xd702, 0x6903, 0x2104, 0x9f05, 0xf606, 0x4807,
+	   0x3208, 0x8c09, 0xe50a, 0x5b0b, 0x130c, 0xad0d, 0xc40e, 0x7a0f },
+	{/*21*/ 0, 0x4101, 0x8202, 0xc303, 0x5804, 0x1905, 0xda06, 0x9b07,
+	   0xac08, 0xed09, 0x2e0a, 0x6f0b, 0x640c, 0xb50d, 0x760e, 0x370f },
+	{/*22*/ 1, 0xc441, 0x4882, 0x8cc3, 0xf654, 0x3215, 0xbed6, 0x7a97,
+	   0x5ba8, 0x9fe9, 0x132a, 0xd76b, 0xadfc, 0x69bd, 0xe57e, 0x213f },
+	{/*23*/ 1, 0x7621, 0x9b32, 0xed13, 0xda44, 0xac65, 0x4176, 0x3757,
+	   0x6f88, 0x19a9, 0xf4ba, 0x829b, 0xb5cc, 0xc3ed, 0x2efe, 0x58df }
+};
+
+/*
+ * Given the syndrome argument, scan each of the channel tables for a syndrome
+ * match. Depending on which table it is found, return the channel number.
+ */
+static int get_channel_from_ecc_syndrome(unsigned short syndrome)
+{
+	int row;
+	int column;
+
+	/* Determine column to scan */
+	column = syndrome & 0xF;
+
+	/* Scan all rows, looking for syndrome, or end of table */
+	for (row = 0; row < NUMBER_ECC_ROWS; row++) {
+		if (ecc_chipkill_syndromes[row][column] == syndrome)
+			return ecc_chipkill_syndromes[row][0];
+	}
+
+	debugf0("syndrome(%x) not found\n", syndrome);
+	return -1;
+}
+
+/*
+ * Check for valid error in the NB Status High register. If so, proceed to read
+ * NB Status Low, NB Address Low and NB Address High registers and store data
+ * into error structure.
+ *
+ * Returns:
+ *	- 1: if hardware regs contains valid error info
+ *	- 0: if no valid error is indicated
+ */
+static int amd64_get_error_info_regs(struct mem_ctl_info *mci,
+				     struct err_regs *regs)
+{
+	struct amd64_pvt *pvt;
+	struct pci_dev *misc_f3_ctl;
+	int err = 0;
+
+	pvt = mci->pvt_info;
+	misc_f3_ctl = pvt->misc_f3_ctl;
+
+	err = pci_read_config_dword(misc_f3_ctl, K8_NBSH, &regs->nbsh);
+	if (err)
+		goto err_reg;
+
+	if (!(regs->nbsh & K8_NBSH_VALID_BIT))
+		return 0;
+
+	/* valid error, read remaining error information registers */
+	err = pci_read_config_dword(misc_f3_ctl, K8_NBSL, &regs->nbsl);
+	if (err)
+		goto err_reg;
+
+	err = pci_read_config_dword(misc_f3_ctl, K8_NBEAL, &regs->nbeal);
+	if (err)
+		goto err_reg;
+
+	err = pci_read_config_dword(misc_f3_ctl, K8_NBEAH, &regs->nbeah);
+	if (err)
+		goto err_reg;
+
+	err = pci_read_config_dword(misc_f3_ctl, K8_NBCFG, &regs->nbcfg);
+	if (err)
+		goto err_reg;
+
+	return 1;
+
+err_reg:
+	debugf0("Reading error info register failed\n");
+	return 0;
+}
+
+/*
+ * This function is called to retrieve the error data from hardware and store it
+ * in the info structure.
+ *
+ * Returns:
+ *	- 1: if a valid error is found
+ *	- 0: if no error is found
+ */
+static int amd64_get_error_info(struct mem_ctl_info *mci,
+				struct err_regs *info)
+{
+	struct amd64_pvt *pvt;
+	struct err_regs regs;
+
+	pvt = mci->pvt_info;
+
+	if (!amd64_get_error_info_regs(mci, info))
+		return 0;
+
+	/*
+	 * Here's the problem with the K8's EDAC reporting: There are four
+	 * registers which report pieces of error information. They are shared
+	 * between CEs and UEs. Furthermore, contrary to what is stated in the
+	 * BKDG, the overflow bit is never used! Every error always updates the
+	 * reporting registers.
+	 *
+	 * Can you see the race condition? All four error reporting registers
+	 * must be read before a new error updates them! There is no way to read
+	 * all four registers atomically. The best than can be done is to detect
+	 * that a race has occured and then report the error without any kind of
+	 * precision.
+	 *
+	 * What is still positive is that errors are still reported and thus
+	 * problems can still be detected - just not localized because the
+	 * syndrome and address are spread out across registers.
+	 *
+	 * Grrrrr!!!!!  Here's hoping that AMD fixes this in some future K8 rev.
+	 * UEs and CEs should have separate register sets with proper overflow
+	 * bits that are used! At very least the problem can be fixed by
+	 * honoring the ErrValid bit in 'nbsh' and not updating registers - just
+	 * set the overflow bit - unless the current error is CE and the new
+	 * error is UE which would be the only situation for overwriting the
+	 * current values.
+	 */
+
+	regs = *info;
+
+	/* Use info from the second read - most current */
+	if (unlikely(!amd64_get_error_info_regs(mci, info)))
+		return 0;
+
+	/* clear the error bits in hardware */
+	pci_write_bits32(pvt->misc_f3_ctl, K8_NBSH, 0, K8_NBSH_VALID_BIT);
+
+	/* Check for the possible race condition */
+	if ((regs.nbsh != info->nbsh) ||
+	     (regs.nbsl != info->nbsl) ||
+	     (regs.nbeah != info->nbeah) ||
+	     (regs.nbeal != info->nbeal)) {
+		amd64_mc_printk(mci, KERN_WARNING,
+				"hardware STATUS read access race condition "
+				"detected!\n");
+		return 0;
+	}
+	return 1;
+}
+
+/*
+ * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
+ * ADDRESS and process.
+ */
+static void amd64_handle_ce(struct mem_ctl_info *mci,
+			    struct err_regs *info)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	u64 SystemAddress;
+
+	/* Ensure that the Error Address is VALID */
+	if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
+		amd64_mc_printk(mci, KERN_ERR,
+			"HW has no ERROR_ADDRESS available\n");
+		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+		return;
+	}
+
+	SystemAddress = extract_error_address(mci, info);
+
+	amd64_mc_printk(mci, KERN_ERR,
+		"CE ERROR_ADDRESS= 0x%llx\n", SystemAddress);
+
+	pvt->ops->map_sysaddr_to_csrow(mci, info, SystemAddress);
+}
+
+/* Handle any Un-correctable Errors (UEs) */
+static void amd64_handle_ue(struct mem_ctl_info *mci,
+			    struct err_regs *info)
+{
+	int csrow;
+	u64 SystemAddress;
+	u32 page, offset;
+	struct mem_ctl_info *log_mci, *src_mci = NULL;
+
+	log_mci = mci;
+
+	if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
+		amd64_mc_printk(mci, KERN_CRIT,
+			"HW has no ERROR_ADDRESS available\n");
+		edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+		return;
+	}
+
+	SystemAddress = extract_error_address(mci, info);
+
+	/*
+	 * Find out which node the error address belongs to. This may be
+	 * different from the node that detected the error.
+	 */
+	src_mci = find_mc_by_sys_addr(mci, SystemAddress);
+	if (!src_mci) {
+		amd64_mc_printk(mci, KERN_CRIT,
+			"ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n",
+			(unsigned long)SystemAddress);
+		edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+		return;
+	}
+
+	log_mci = src_mci;
+
+	csrow = sys_addr_to_csrow(log_mci, SystemAddress);
+	if (csrow < 0) {
+		amd64_mc_printk(mci, KERN_CRIT,
+			"ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n",
+			(unsigned long)SystemAddress);
+		edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+	} else {
+		error_address_to_page_and_offset(SystemAddress, &page, &offset);
+		edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
+	}
+}
+
+static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
+					    struct err_regs *info)
+{
+	u32 ec  = ERROR_CODE(info->nbsl);
+	u32 xec = EXT_ERROR_CODE(info->nbsl);
+	int ecc_type = (info->nbsh >> 13) & 0x3;
+
+	/* Bail early out if this was an 'observed' error */
+	if (PP(ec) == K8_NBSL_PP_OBS)
+		return;
+
+	/* Do only ECC errors */
+	if (xec && xec != F10_NBSL_EXT_ERR_ECC)
+		return;
+
+	if (ecc_type == 2)
+		amd64_handle_ce(mci, info);
+	else if (ecc_type == 1)
+		amd64_handle_ue(mci, info);
+
+	/*
+	 * If main error is CE then overflow must be CE.  If main error is UE
+	 * then overflow is unknown.  We'll call the overflow a CE - if
+	 * panic_on_ue is set then we're already panic'ed and won't arrive
+	 * here. Else, then apparently someone doesn't think that UE's are
+	 * catastrophic.
+	 */
+	if (info->nbsh & K8_NBSH_OVERFLOW)
+		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR "Error Overflow");
+}
+
+void amd64_decode_bus_error(int node_id, struct err_regs *regs)
+{
+	struct mem_ctl_info *mci = mci_lookup[node_id];
+
+	__amd64_decode_bus_error(mci, regs);
+
+	/*
+	 * Check the UE bit of the NB status high register, if set generate some
+	 * logs. If NOT a GART error, then process the event as a NO-INFO event.
+	 * If it was a GART error, skip that process.
+	 *
+	 * FIXME: this should go somewhere else, if at all.
+	 */
+	if (regs->nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
+		edac_mc_handle_ue_no_info(mci, "UE bit is set");
+
+}
+
+/*
+ * The main polling 'check' function, called FROM the edac core to perform the
+ * error checking and if an error is encountered, error processing.
+ */
+static void amd64_check(struct mem_ctl_info *mci)
+{
+	struct err_regs regs;
+
+	if (amd64_get_error_info(mci, &regs)) {
+		struct amd64_pvt *pvt = mci->pvt_info;
+		amd_decode_nb_mce(pvt->mc_node_id, &regs, 1);
+	}
+}
+
+/*
+ * Input:
+ *	1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer
+ *	2) AMD Family index value
+ *
+ * Ouput:
+ *	Upon return of 0, the following filled in:
+ *
+ *		struct pvt->addr_f1_ctl
+ *		struct pvt->misc_f3_ctl
+ *
+ *	Filled in with related device funcitions of 'dram_f2_ctl'
+ *	These devices are "reserved" via the pci_get_device()
+ *
+ *	Upon return of 1 (error status):
+ *
+ *		Nothing reserved
+ */
+static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx)
+{
+	const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx];
+
+	/* Reserve the ADDRESS MAP Device */
+	pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
+						    amd64_dev->addr_f1_ctl,
+						    pvt->dram_f2_ctl);
+
+	if (!pvt->addr_f1_ctl) {
+		amd64_printk(KERN_ERR, "error address map device not found: "
+			     "vendor %x device 0x%x (broken BIOS?)\n",
+			     PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl);
+		return 1;
+	}
+
+	/* Reserve the MISC Device */
+	pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
+						    amd64_dev->misc_f3_ctl,
+						    pvt->dram_f2_ctl);
+
+	if (!pvt->misc_f3_ctl) {
+		pci_dev_put(pvt->addr_f1_ctl);
+		pvt->addr_f1_ctl = NULL;
+
+		amd64_printk(KERN_ERR, "error miscellaneous device not found: "
+			     "vendor %x device 0x%x (broken BIOS?)\n",
+			     PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl);
+		return 1;
+	}
+
+	debugf1("    Addr Map device PCI Bus ID:\t%s\n",
+		pci_name(pvt->addr_f1_ctl));
+	debugf1("    DRAM MEM-CTL PCI Bus ID:\t%s\n",
+		pci_name(pvt->dram_f2_ctl));
+	debugf1("    Misc device PCI Bus ID:\t%s\n",
+		pci_name(pvt->misc_f3_ctl));
+
+	return 0;
+}
+
+static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
+{
+	pci_dev_put(pvt->addr_f1_ctl);
+	pci_dev_put(pvt->misc_f3_ctl);
+}
+
+/*
+ * Retrieve the hardware registers of the memory controller (this includes the
+ * 'Address Map' and 'Misc' device regs)
+ */
+static void amd64_read_mc_registers(struct amd64_pvt *pvt)
+{
+	u64 msr_val;
+	int dram, err = 0;
+
+	/*
+	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
+	 * those are Read-As-Zero
+	 */
+	rdmsrl(MSR_K8_TOP_MEM1, msr_val);
+	pvt->top_mem = msr_val >> 23;
+	debugf0("  TOP_MEM=0x%08llx\n", pvt->top_mem);
+
+	/* check first whether TOP_MEM2 is enabled */
+	rdmsrl(MSR_K8_SYSCFG, msr_val);
+	if (msr_val & (1U << 21)) {
+		rdmsrl(MSR_K8_TOP_MEM2, msr_val);
+		pvt->top_mem2 = msr_val >> 23;
+		debugf0("  TOP_MEM2=0x%08llx\n", pvt->top_mem2);
+	} else
+		debugf0("  TOP_MEM2 disabled.\n");
+
+	amd64_cpu_display_info(pvt);
+
+	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
+	if (err)
+		goto err_reg;
+
+	if (pvt->ops->read_dram_ctl_register)
+		pvt->ops->read_dram_ctl_register(pvt);
+
+	for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
+		/*
+		 * Call CPU specific READ function to get the DRAM Base and
+		 * Limit values from the DCT.
+		 */
+		pvt->ops->read_dram_base_limit(pvt, dram);
+
+		/*
+		 * Only print out debug info on rows with both R and W Enabled.
+		 * Normal processing, compiler should optimize this whole 'if'
+		 * debug output block away.
+		 */
+		if (pvt->dram_rw_en[dram] != 0) {
+			debugf1("  DRAM_BASE[%d]: 0x%8.08x-%8.08x "
+				"DRAM_LIMIT:  0x%8.08x-%8.08x\n",
+				dram,
+				(u32)(pvt->dram_base[dram] >> 32),
+				(u32)(pvt->dram_base[dram] & 0xFFFFFFFF),
+				(u32)(pvt->dram_limit[dram] >> 32),
+				(u32)(pvt->dram_limit[dram] & 0xFFFFFFFF));
+			debugf1("        IntlvEn=%s %s %s "
+				"IntlvSel=%d DstNode=%d\n",
+				pvt->dram_IntlvEn[dram] ?
+					"Enabled" : "Disabled",
+				(pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
+				(pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
+				pvt->dram_IntlvSel[dram],
+				pvt->dram_DstNode[dram]);
+		}
+	}
+
+	amd64_read_dct_base_mask(pvt);
+
+	err = pci_read_config_dword(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
+	if (err)
+		goto err_reg;
+
+	amd64_read_dbam_reg(pvt);
+
+	err = pci_read_config_dword(pvt->misc_f3_ctl,
+				F10_ONLINE_SPARE, &pvt->online_spare);
+	if (err)
+		goto err_reg;
+
+	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
+	if (err)
+		goto err_reg;
+
+	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
+	if (err)
+		goto err_reg;
+
+	if (!dct_ganging_enabled(pvt)) {
+		err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1,
+						&pvt->dclr1);
+		if (err)
+			goto err_reg;
+
+		err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_1,
+						&pvt->dchr1);
+		if (err)
+			goto err_reg;
+	}
+
+	amd64_dump_misc_regs(pvt);
+
+	return;
+
+err_reg:
+	debugf0("Reading an MC register failed\n");
+
+}
+
+/*
+ * NOTE: CPU Revision Dependent code
+ *
+ * Input:
+ *	@csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
+ *	k8 private pointer to -->
+ *			DRAM Bank Address mapping register
+ *			node_id
+ *			DCL register where dual_channel_active is
+ *
+ * The DBAM register consists of 4 sets of 4 bits each definitions:
+ *
+ * Bits:	CSROWs
+ * 0-3		CSROWs 0 and 1
+ * 4-7		CSROWs 2 and 3
+ * 8-11		CSROWs 4 and 5
+ * 12-15	CSROWs 6 and 7
+ *
+ * Values range from: 0 to 15
+ * The meaning of the values depends on CPU revision and dual-channel state,
+ * see relevant BKDG more info.
+ *
+ * The memory controller provides for total of only 8 CSROWs in its current
+ * architecture. Each "pair" of CSROWs normally represents just one DIMM in
+ * single channel or two (2) DIMMs in dual channel mode.
+ *
+ * The following code logic collapses the various tables for CSROW based on CPU
+ * revision.
+ *
+ * Returns:
+ *	The number of PAGE_SIZE pages on the specified CSROW number it
+ *	encompasses
+ *
+ */
+static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
+{
+	u32 dram_map, nr_pages;
+
+	/*
+	 * The math on this doesn't look right on the surface because x/2*4 can
+	 * be simplified to x*2 but this expression makes use of the fact that
+	 * it is integral math where 1/2=0. This intermediate value becomes the
+	 * number of bits to shift the DBAM register to extract the proper CSROW
+	 * field.
+	 */
+	dram_map = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
+
+	nr_pages = pvt->ops->dbam_map_to_pages(pvt, dram_map);
+
+	/*
+	 * If dual channel then double the memory size of single channel.
+	 * Channel count is 1 or 2
+	 */
+	nr_pages <<= (pvt->channel_count - 1);
+
+	debugf0("  (csrow=%d) DBAM map index= %d\n", csrow_nr, dram_map);
+	debugf0("    nr_pages= %u  channel-count = %d\n",
+		nr_pages, pvt->channel_count);
+
+	return nr_pages;
+}
+
+/*
+ * Initialize the array of csrow attribute instances, based on the values
+ * from pci config hardware registers.
+ */
+static int amd64_init_csrows(struct mem_ctl_info *mci)
+{
+	struct csrow_info *csrow;
+	struct amd64_pvt *pvt;
+	u64 input_addr_min, input_addr_max, sys_addr;
+	int i, err = 0, empty = 1;
+
+	pvt = mci->pvt_info;
+
+	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
+	if (err)
+		debugf0("Reading K8_NBCFG failed\n");
+
+	debugf0("NBCFG= 0x%x  CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
+		(pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
+		(pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
+		);
+
+	for (i = 0; i < pvt->cs_count; i++) {
+		csrow = &mci->csrows[i];
+
+		if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
+			debugf1("----CSROW %d EMPTY for node %d\n", i,
+				pvt->mc_node_id);
+			continue;
+		}
+
+		debugf1("----CSROW %d VALID for MC node %d\n",
+			i, pvt->mc_node_id);
+
+		empty = 0;
+		csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
+		find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
+		sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
+		csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
+		sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
+		csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
+		csrow->page_mask = ~mask_from_dct_mask(pvt, i);
+		/* 8 bytes of resolution */
+
+		csrow->mtype = amd64_determine_memory_type(pvt);
+
+		debugf1("  for MC node %d csrow %d:\n", pvt->mc_node_id, i);
+		debugf1("    input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
+			(unsigned long)input_addr_min,
+			(unsigned long)input_addr_max);
+		debugf1("    sys_addr: 0x%lx  page_mask: 0x%lx\n",
+			(unsigned long)sys_addr, csrow->page_mask);
+		debugf1("    nr_pages: %u  first_page: 0x%lx "
+			"last_page: 0x%lx\n",
+			(unsigned)csrow->nr_pages,
+			csrow->first_page, csrow->last_page);
+
+		/*
+		 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
+		 */
+		if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
+			csrow->edac_mode =
+			    (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
+			    EDAC_S4ECD4ED : EDAC_SECDED;
+		else
+			csrow->edac_mode = EDAC_NONE;
+	}
+
+	return empty;
+}
+
+/* get all cores on this DCT */
+static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
+{
+	int cpu;
+
+	for_each_online_cpu(cpu)
+		if (amd_get_nb_id(cpu) == nid)
+			cpumask_set_cpu(cpu, mask);
+}
+
+/* check MCG_CTL on all the cpus on this node */
+static bool amd64_nb_mce_bank_enabled_on_node(int nid)
+{
+	cpumask_var_t mask;
+	int cpu, nbe;
+	bool ret = false;
+
+	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+		amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
+			     __func__);
+		return false;
+	}
+
+	get_cpus_on_this_dct_cpumask(mask, nid);
+
+	rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
+
+	for_each_cpu(cpu, mask) {
+		struct msr *reg = per_cpu_ptr(msrs, cpu);
+		nbe = reg->l & K8_MSR_MCGCTL_NBE;
+
+		debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
+			cpu, reg->q,
+			(nbe ? "enabled" : "disabled"));
+
+		if (!nbe)
+			goto out;
+	}
+	ret = true;
+
+out:
+	free_cpumask_var(mask);
+	return ret;
+}
+
+static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
+{
+	cpumask_var_t cmask;
+	int cpu;
+
+	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
+		amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
+			     __func__);
+		return false;
+	}
+
+	get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
+
+	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
+
+	for_each_cpu(cpu, cmask) {
+
+		struct msr *reg = per_cpu_ptr(msrs, cpu);
+
+		if (on) {
+			if (reg->l & K8_MSR_MCGCTL_NBE)
+				pvt->flags.ecc_report = 1;
+
+			reg->l |= K8_MSR_MCGCTL_NBE;
+		} else {
+			/*
+			 * Turn off ECC reporting only when it was off before
+			 */
+			if (!pvt->flags.ecc_report)
+				reg->l &= ~K8_MSR_MCGCTL_NBE;
+		}
+	}
+	wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
+
+	free_cpumask_var(cmask);
+
+	return 0;
+}
+
+/*
+ * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
+ * enable it.
+ */
+static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	int err = 0;
+	u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+
+	if (!ecc_enable_override)
+		return;
+
+	amd64_printk(KERN_WARNING,
+		"'ecc_enable_override' parameter is active, "
+		"Enabling AMD ECC hardware now: CAUTION\n");
+
+	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
+	if (err)
+		debugf0("Reading K8_NBCTL failed\n");
+
+	/* turn on UECCn and CECCEn bits */
+	pvt->old_nbctl = value & mask;
+	pvt->nbctl_mcgctl_saved = 1;
+
+	value |= mask;
+	pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+
+	if (amd64_toggle_ecc_err_reporting(pvt, ON))
+		amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
+					   "MCGCTL!\n");
+
+	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
+	if (err)
+		debugf0("Reading K8_NBCFG failed\n");
+
+	debugf0("NBCFG(1)= 0x%x  CHIPKILL= %s ECC_ENABLE= %s\n", value,
+		(value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
+		(value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
+
+	if (!(value & K8_NBCFG_ECC_ENABLE)) {
+		amd64_printk(KERN_WARNING,
+			"This node reports that DRAM ECC is "
+			"currently Disabled; ENABLING now\n");
+
+		/* Attempt to turn on DRAM ECC Enable */
+		value |= K8_NBCFG_ECC_ENABLE;
+		pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
+
+		err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
+		if (err)
+			debugf0("Reading K8_NBCFG failed\n");
+
+		if (!(value & K8_NBCFG_ECC_ENABLE)) {
+			amd64_printk(KERN_WARNING,
+				"Hardware rejects Enabling DRAM ECC checking\n"
+				"Check memory DIMM configuration\n");
+		} else {
+			amd64_printk(KERN_DEBUG,
+				"Hardware accepted DRAM ECC Enable\n");
+		}
+	}
+	debugf0("NBCFG(2)= 0x%x  CHIPKILL= %s ECC_ENABLE= %s\n", value,
+		(value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
+		(value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
+
+	pvt->ctl_error_info.nbcfg = value;
+}
+
+static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
+{
+	int err = 0;
+	u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+
+	if (!pvt->nbctl_mcgctl_saved)
+		return;
+
+	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
+	if (err)
+		debugf0("Reading K8_NBCTL failed\n");
+	value &= ~mask;
+	value |= pvt->old_nbctl;
+
+	/* restore the NB Enable MCGCTL bit */
+	pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+
+	if (amd64_toggle_ecc_err_reporting(pvt, OFF))
+		amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
+					   "MCGCTL!\n");
+}
+
+/*
+ * EDAC requires that the BIOS have ECC enabled before taking over the
+ * processing of ECC errors. This is because the BIOS can properly initialize
+ * the memory system completely. A command line option allows to force-enable
+ * hardware ECC later in amd64_enable_ecc_error_reporting().
+ */
+static const char *ecc_msg =
+	"ECC disabled in the BIOS or no ECC capability, module will not load.\n"
+	" Either enable ECC checking or force module loading by setting "
+	"'ecc_enable_override'.\n"
+	" (Note that use of the override may cause unknown side effects.)\n";
+
+static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
+{
+	u32 value;
+	int err = 0;
+	u8 ecc_enabled = 0;
+	bool nb_mce_en = false;
+
+	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
+	if (err)
+		debugf0("Reading K8_NBCTL failed\n");
+
+	ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
+	if (!ecc_enabled)
+		amd64_printk(KERN_NOTICE, "This node reports that Memory ECC "
+			     "is currently disabled, set F3x%x[22] (%s).\n",
+			     K8_NBCFG, pci_name(pvt->misc_f3_ctl));
+	else
+		amd64_printk(KERN_INFO, "ECC is enabled by BIOS.\n");
+
+	nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
+	if (!nb_mce_en)
+		amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR "
+			     "0x%08x[4] on node %d to enable.\n",
+			     MSR_IA32_MCG_CTL, pvt->mc_node_id);
+
+	if (!ecc_enabled || !nb_mce_en) {
+		if (!ecc_enable_override) {
+			amd64_printk(KERN_NOTICE, "%s", ecc_msg);
+			return -ENODEV;
+		}
+		ecc_enable_override = 0;
+	}
+
+	return 0;
+}
+
+struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
+					  ARRAY_SIZE(amd64_inj_attrs) +
+					  1];
+
+struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
+
+static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
+{
+	unsigned int i = 0, j = 0;
+
+	for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
+		sysfs_attrs[i] = amd64_dbg_attrs[i];
+
+	for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
+		sysfs_attrs[i] = amd64_inj_attrs[j];
+
+	sysfs_attrs[i] = terminator;
+
+	mci->mc_driver_sysfs_attributes = sysfs_attrs;
+}
+
+static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+
+	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
+	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
+
+	if (pvt->nbcap & K8_NBCAP_SECDED)
+		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
+
+	if (pvt->nbcap & K8_NBCAP_CHIPKILL)
+		mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+
+	mci->edac_cap		= amd64_determine_edac_cap(pvt);
+	mci->mod_name		= EDAC_MOD_STR;
+	mci->mod_ver		= EDAC_AMD64_VERSION;
+	mci->ctl_name		= get_amd_family_name(pvt->mc_type_index);
+	mci->dev_name		= pci_name(pvt->dram_f2_ctl);
+	mci->ctl_page_to_phys	= NULL;
+
+	/* IMPORTANT: Set the polling 'check' function in this module */
+	mci->edac_check		= amd64_check;
+
+	/* memory scrubber interface */
+	mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
+	mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
+}
+
+/*
+ * Init stuff for this DRAM Controller device.
+ *
+ * Due to a hardware feature on Fam10h CPUs, the Enable Extended Configuration
+ * Space feature MUST be enabled on ALL Processors prior to actually reading
+ * from the ECS registers. Since the loading of the module can occur on any
+ * 'core', and cores don't 'see' all the other processors ECS data when the
+ * others are NOT enabled. Our solution is to first enable ECS access in this
+ * routine on all processors, gather some data in a amd64_pvt structure and
+ * later come back in a finish-setup function to perform that final
+ * initialization. See also amd64_init_2nd_stage() for that.
+ */
+static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
+				    int mc_type_index)
+{
+	struct amd64_pvt *pvt = NULL;
+	int err = 0, ret;
+
+	ret = -ENOMEM;
+	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
+	if (!pvt)
+		goto err_exit;
+
+	pvt->mc_node_id = get_node_id(dram_f2_ctl);
+
+	pvt->dram_f2_ctl	= dram_f2_ctl;
+	pvt->ext_model		= boot_cpu_data.x86_model >> 4;
+	pvt->mc_type_index	= mc_type_index;
+	pvt->ops		= family_ops(mc_type_index);
+
+	/*
+	 * We have the dram_f2_ctl device as an argument, now go reserve its
+	 * sibling devices from the PCI system.
+	 */
+	ret = -ENODEV;
+	err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index);
+	if (err)
+		goto err_free;
+
+	ret = -EINVAL;
+	err = amd64_check_ecc_enabled(pvt);
+	if (err)
+		goto err_put;
+
+	/*
+	 * Key operation here: setup of HW prior to performing ops on it. Some
+	 * setup is required to access ECS data. After this is performed, the
+	 * 'teardown' function must be called upon error and normal exit paths.
+	 */
+	if (boot_cpu_data.x86 >= 0x10)
+		amd64_setup(pvt);
+
+	/*
+	 * Save the pointer to the private data for use in 2nd initialization
+	 * stage
+	 */
+	pvt_lookup[pvt->mc_node_id] = pvt;
+
+	return 0;
+
+err_put:
+	amd64_free_mc_sibling_devices(pvt);
+
+err_free:
+	kfree(pvt);
+
+err_exit:
+	return ret;
+}
+
+/*
+ * This is the finishing stage of the init code. Needs to be performed after all
+ * MCs' hardware have been prepped for accessing extended config space.
+ */
+static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
+{
+	int node_id = pvt->mc_node_id;
+	struct mem_ctl_info *mci;
+	int ret, err = 0;
+
+	amd64_read_mc_registers(pvt);
+
+	ret = -ENODEV;
+	if (pvt->ops->probe_valid_hardware) {
+		err = pvt->ops->probe_valid_hardware(pvt);
+		if (err)
+			goto err_exit;
+	}
+
+	/*
+	 * We need to determine how many memory channels there are. Then use
+	 * that information for calculating the size of the dynamic instance
+	 * tables in the 'mci' structure
+	 */
+	pvt->channel_count = pvt->ops->early_channel_count(pvt);
+	if (pvt->channel_count < 0)
+		goto err_exit;
+
+	ret = -ENOMEM;
+	mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id);
+	if (!mci)
+		goto err_exit;
+
+	mci->pvt_info = pvt;
+
+	mci->dev = &pvt->dram_f2_ctl->dev;
+	amd64_setup_mci_misc_attributes(mci);
+
+	if (amd64_init_csrows(mci))
+		mci->edac_cap = EDAC_FLAG_NONE;
+
+	amd64_enable_ecc_error_reporting(mci);
+	amd64_set_mc_sysfs_attributes(mci);
+
+	ret = -ENODEV;
+	if (edac_mc_add_mc(mci)) {
+		debugf1("failed edac_mc_add_mc()\n");
+		goto err_add_mc;
+	}
+
+	mci_lookup[node_id] = mci;
+	pvt_lookup[node_id] = NULL;
+
+	/* register stuff with EDAC MCE */
+	if (report_gart_errors)
+		amd_report_gart_errors(true);
+
+	amd_register_ecc_decoder(amd64_decode_bus_error);
+
+	return 0;
+
+err_add_mc:
+	edac_mc_free(mci);
+
+err_exit:
+	debugf0("failure to init 2nd stage: ret=%d\n", ret);
+
+	amd64_restore_ecc_error_reporting(pvt);
+
+	if (boot_cpu_data.x86 > 0xf)
+		amd64_teardown(pvt);
+
+	amd64_free_mc_sibling_devices(pvt);
+
+	kfree(pvt_lookup[pvt->mc_node_id]);
+	pvt_lookup[node_id] = NULL;
+
+	return ret;
+}
+
+
+static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
+				 const struct pci_device_id *mc_type)
+{
+	int ret = 0;
+
+	debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev),
+		get_amd_family_name(mc_type->driver_data));
+
+	ret = pci_enable_device(pdev);
+	if (ret < 0)
+		ret = -EIO;
+	else
+		ret = amd64_probe_one_instance(pdev, mc_type->driver_data);
+
+	if (ret < 0)
+		debugf0("ret=%d\n", ret);
+
+	return ret;
+}
+
+static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+	struct amd64_pvt *pvt;
+
+	/* Remove from EDAC CORE tracking list */
+	mci = edac_mc_del_mc(&pdev->dev);
+	if (!mci)
+		return;
+
+	pvt = mci->pvt_info;
+
+	amd64_restore_ecc_error_reporting(pvt);
+
+	if (boot_cpu_data.x86 > 0xf)
+		amd64_teardown(pvt);
+
+	amd64_free_mc_sibling_devices(pvt);
+
+	/* unregister from EDAC MCE */
+	amd_report_gart_errors(false);
+	amd_unregister_ecc_decoder(amd64_decode_bus_error);
+
+	/* Free the EDAC CORE resources */
+	mci->pvt_info = NULL;
+	mci_lookup[pvt->mc_node_id] = NULL;
+
+	kfree(pvt);
+	edac_mc_free(mci);
+}
+
+/*
+ * This table is part of the interface for loading drivers for PCI devices. The
+ * PCI core identifies what devices are on a system during boot, and then
+ * inquiry this table to see if this driver is for a given device found.
+ */
+static const struct pci_device_id amd64_pci_table[] __devinitdata = {
+	{
+		.vendor		= PCI_VENDOR_ID_AMD,
+		.device		= PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.class		= 0,
+		.class_mask	= 0,
+		.driver_data	= K8_CPUS
+	},
+	{
+		.vendor		= PCI_VENDOR_ID_AMD,
+		.device		= PCI_DEVICE_ID_AMD_10H_NB_DRAM,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.class		= 0,
+		.class_mask	= 0,
+		.driver_data	= F10_CPUS
+	},
+	{
+		.vendor		= PCI_VENDOR_ID_AMD,
+		.device		= PCI_DEVICE_ID_AMD_11H_NB_DRAM,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.class		= 0,
+		.class_mask	= 0,
+		.driver_data	= F11_CPUS
+	},
+	{0, }
+};
+MODULE_DEVICE_TABLE(pci, amd64_pci_table);
+
+static struct pci_driver amd64_pci_driver = {
+	.name		= EDAC_MOD_STR,
+	.probe		= amd64_init_one_instance,
+	.remove		= __devexit_p(amd64_remove_one_instance),
+	.id_table	= amd64_pci_table,
+};
+
+static void amd64_setup_pci_device(void)
+{
+	struct mem_ctl_info *mci;
+	struct amd64_pvt *pvt;
+
+	if (amd64_ctl_pci)
+		return;
+
+	mci = mci_lookup[0];
+	if (mci) {
+
+		pvt = mci->pvt_info;
+		amd64_ctl_pci =
+			edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev,
+						    EDAC_MOD_STR);
+
+		if (!amd64_ctl_pci) {
+			pr_warning("%s(): Unable to create PCI control\n",
+				   __func__);
+
+			pr_warning("%s(): PCI error report via EDAC not set\n",
+				   __func__);
+			}
+	}
+}
+
+static int __init amd64_edac_init(void)
+{
+	int nb, err = -ENODEV;
+	bool load_ok = false;
+
+	edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
+
+	opstate_init();
+
+	if (cache_k8_northbridges() < 0)
+		goto err_ret;
+
+	msrs = msrs_alloc();
+	if (!msrs)
+		goto err_ret;
+
+	err = pci_register_driver(&amd64_pci_driver);
+	if (err)
+		goto err_pci;
+
+	/*
+	 * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
+	 * amd64_pvt structs. These will be used in the 2nd stage init function
+	 * to finish initialization of the MC instances.
+	 */
+	err = -ENODEV;
+	for (nb = 0; nb < num_k8_northbridges; nb++) {
+		if (!pvt_lookup[nb])
+			continue;
+
+		err = amd64_init_2nd_stage(pvt_lookup[nb]);
+		if (err)
+			goto err_2nd_stage;
+
+		load_ok = true;
+	}
+
+	if (load_ok) {
+		amd64_setup_pci_device();
+		return 0;
+	}
+
+err_2nd_stage:
+	pci_unregister_driver(&amd64_pci_driver);
+err_pci:
+	msrs_free(msrs);
+	msrs = NULL;
+err_ret:
+	return err;
+}
+
+static void __exit amd64_edac_exit(void)
+{
+	if (amd64_ctl_pci)
+		edac_pci_release_generic_ctl(amd64_ctl_pci);
+
+	pci_unregister_driver(&amd64_pci_driver);
+
+	msrs_free(msrs);
+	msrs = NULL;
+}
+
+module_init(amd64_edac_init);
+module_exit(amd64_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
+		"Dave Peterson, Thayne Harbaugh");
+MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
+		EDAC_AMD64_VERSION);
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
new file mode 100644
index 0000000..bba6c94
--- /dev/null
+++ b/drivers/edac/amd64_edac.h
@@ -0,0 +1,577 @@
+/*
+ * AMD64 class Memory Controller kernel module
+ *
+ * Copyright (c) 2009 SoftwareBitMaker.
+ * Copyright (c) 2009 Advanced Micro Devices, Inc.
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ *	Originally Written by Thayne Harbaugh
+ *
+ *      Changes by Douglas "norsk" Thompson  <dougthompson@xmission.com>:
+ *		- K8 CPU Revision D and greater support
+ *
+ *      Changes by Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>:
+ *		- Module largely rewritten, with new (and hopefully correct)
+ *		code for dealing with node and chip select interleaving,
+ *		various code cleanup, and bug fixes
+ *		- Added support for memory hoisting using DRAM hole address
+ *		register
+ *
+ *	Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
+ *		-K8 Rev (1207) revision support added, required Revision
+ *		specific mini-driver code to support Rev F as well as
+ *		prior revisions
+ *
+ *	Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
+ *		-Family 10h revision support added. New PCI Device IDs,
+ *		indicating new changes. Actual registers modified
+ *		were slight, less than the Rev E to Rev F transition
+ *		but changing the PCI Device ID was the proper thing to
+ *		do, as it provides for almost automactic family
+ *		detection. The mods to Rev F required more family
+ *		information detection.
+ *
+ *	Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>:
+ *		- misc fixes and code cleanups
+ *
+ * This module is based on the following documents
+ * (available from http://www.amd.com/):
+ *
+ *	Title:	BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD
+ *		Opteron Processors
+ *	AMD publication #: 26094
+ *`	Revision: 3.26
+ *
+ *	Title:	BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh
+ *		Processors
+ *	AMD publication #: 32559
+ *	Revision: 3.00
+ *	Issue Date: May 2006
+ *
+ *	Title:	BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h
+ *		Processors
+ *	AMD publication #: 31116
+ *	Revision: 3.00
+ *	Issue Date: September 07, 2007
+ *
+ * Sections in the first 2 documents are no longer in sync with each other.
+ * The Family 10h BKDG was totally re-written from scratch with a new
+ * presentation model.
+ * Therefore, comments that refer to a Document section might be off.
+ */
+
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/mmzone.h>
+#include <linux/edac.h>
+#include <asm/msr.h>
+#include "edac_core.h"
+#include "edac_mce_amd.h"
+
+#define amd64_printk(level, fmt, arg...) \
+	edac_printk(level, "amd64", fmt, ##arg)
+
+#define amd64_mc_printk(mci, level, fmt, arg...) \
+	edac_mc_chipset_printk(mci, level, "amd64", fmt, ##arg)
+
+/*
+ * Throughout the comments in this code, the following terms are used:
+ *
+ *	SysAddr, DramAddr, and InputAddr
+ *
+ *  These terms come directly from the amd64 documentation
+ * (AMD publication #26094).  They are defined as follows:
+ *
+ *     SysAddr:
+ *         This is a physical address generated by a CPU core or a device
+ *         doing DMA.  If generated by a CPU core, a SysAddr is the result of
+ *         a virtual to physical address translation by the CPU core's address
+ *         translation mechanism (MMU).
+ *
+ *     DramAddr:
+ *         A DramAddr is derived from a SysAddr by subtracting an offset that
+ *         depends on which node the SysAddr maps to and whether the SysAddr
+ *         is within a range affected by memory hoisting.  The DRAM Base
+ *         (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers
+ *         determine which node a SysAddr maps to.
+ *
+ *         If the DRAM Hole Address Register (DHAR) is enabled and the SysAddr
+ *         is within the range of addresses specified by this register, then
+ *         a value x from the DHAR is subtracted from the SysAddr to produce a
+ *         DramAddr.  Here, x represents the base address for the node that
+ *         the SysAddr maps to plus an offset due to memory hoisting.  See
+ *         section 3.4.8 and the comments in amd64_get_dram_hole_info() and
+ *         sys_addr_to_dram_addr() below for more information.
+ *
+ *         If the SysAddr is not affected by the DHAR then a value y is
+ *         subtracted from the SysAddr to produce a DramAddr.  Here, y is the
+ *         base address for the node that the SysAddr maps to.  See section
+ *         3.4.4 and the comments in sys_addr_to_dram_addr() below for more
+ *         information.
+ *
+ *     InputAddr:
+ *         A DramAddr is translated to an InputAddr before being passed to the
+ *         memory controller for the node that the DramAddr is associated
+ *         with.  The memory controller then maps the InputAddr to a csrow.
+ *         If node interleaving is not in use, then the InputAddr has the same
+ *         value as the DramAddr.  Otherwise, the InputAddr is produced by
+ *         discarding the bits used for node interleaving from the DramAddr.
+ *         See section 3.4.4 for more information.
+ *
+ *         The memory controller for a given node uses its DRAM CS Base and
+ *         DRAM CS Mask registers to map an InputAddr to a csrow.  See
+ *         sections 3.5.4 and 3.5.5 for more information.
+ */
+
+#define EDAC_AMD64_VERSION		" Ver: 3.2.0 " __DATE__
+#define EDAC_MOD_STR			"amd64_edac"
+
+#define EDAC_MAX_NUMNODES		8
+
+/* Extended Model from CPUID, for CPU Revision numbers */
+#define OPTERON_CPU_LE_REV_C		0
+#define OPTERON_CPU_REV_D		1
+#define OPTERON_CPU_REV_E		2
+
+/* NPT processors have the following Extended Models */
+#define OPTERON_CPU_REV_F		4
+#define OPTERON_CPU_REV_FA		5
+
+/* Hardware limit on ChipSelect rows per MC and processors per system */
+#define MAX_CS_COUNT			8
+#define DRAM_REG_COUNT			8
+
+#define ON true
+#define OFF false
+
+/*
+ * PCI-defined configuration space registers
+ */
+
+
+/*
+ * Function 1 - Address Map
+ */
+#define K8_DRAM_BASE_LOW		0x40
+#define K8_DRAM_LIMIT_LOW		0x44
+#define K8_DHAR				0xf0
+
+#define DHAR_VALID			BIT(0)
+#define F10_DRAM_MEM_HOIST_VALID	BIT(1)
+
+#define DHAR_BASE_MASK			0xff000000
+#define dhar_base(dhar)			(dhar & DHAR_BASE_MASK)
+
+#define K8_DHAR_OFFSET_MASK		0x0000ff00
+#define k8_dhar_offset(dhar)		((dhar & K8_DHAR_OFFSET_MASK) << 16)
+
+#define F10_DHAR_OFFSET_MASK		0x0000ff80
+					/* NOTE: Extra mask bit vs K8 */
+#define f10_dhar_offset(dhar)		((dhar & F10_DHAR_OFFSET_MASK) << 16)
+
+
+/* F10 High BASE/LIMIT registers */
+#define F10_DRAM_BASE_HIGH		0x140
+#define F10_DRAM_LIMIT_HIGH		0x144
+
+
+/*
+ * Function 2 - DRAM controller
+ */
+#define K8_DCSB0			0x40
+#define F10_DCSB1			0x140
+
+#define K8_DCSB_CS_ENABLE		BIT(0)
+#define K8_DCSB_NPT_SPARE		BIT(1)
+#define K8_DCSB_NPT_TESTFAIL		BIT(2)
+
+/*
+ * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form
+ * the address
+ */
+#define REV_E_DCSB_BASE_BITS		(0xFFE0FE00ULL)
+#define REV_E_DCS_SHIFT			4
+
+#define REV_F_F1Xh_DCSB_BASE_BITS	(0x1FF83FE0ULL)
+#define REV_F_F1Xh_DCS_SHIFT		8
+
+/*
+ * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount
+ * to form the address
+ */
+#define REV_F_DCSB_BASE_BITS		(0x1FF83FE0ULL)
+#define REV_F_DCS_SHIFT			8
+
+/* DRAM CS Mask Registers */
+#define K8_DCSM0			0x60
+#define F10_DCSM1			0x160
+
+/* REV E: select [29:21] and [15:9] from DCSM */
+#define REV_E_DCSM_MASK_BITS		0x3FE0FE00
+
+/* unused bits [24:20] and [12:0] */
+#define REV_E_DCS_NOTUSED_BITS		0x01F01FFF
+
+/* REV F and later: select [28:19] and [13:5] from DCSM */
+#define REV_F_F1Xh_DCSM_MASK_BITS	0x1FF83FE0
+
+/* unused bits [26:22] and [12:0] */
+#define REV_F_F1Xh_DCS_NOTUSED_BITS	0x07C01FFF
+
+#define DBAM0				0x80
+#define DBAM1				0x180
+
+/* Extract the DIMM 'type' on the i'th DIMM from the DBAM reg value passed */
+#define DBAM_DIMM(i, reg)		((((reg) >> (4*i))) & 0xF)
+
+#define DBAM_MAX_VALUE			11
+
+
+#define F10_DCLR_0			0x90
+#define F10_DCLR_1			0x190
+#define REVE_WIDTH_128			BIT(16)
+#define F10_WIDTH_128			BIT(11)
+
+
+#define F10_DCHR_0			0x94
+#define F10_DCHR_1			0x194
+
+#define F10_DCHR_FOUR_RANK_DIMM		BIT(18)
+#define F10_DCHR_Ddr3Mode		BIT(8)
+#define F10_DCHR_MblMode		BIT(6)
+
+
+#define F10_DCTL_SEL_LOW		0x110
+
+#define dct_sel_baseaddr(pvt)    \
+	((pvt->dram_ctl_select_low) & 0xFFFFF800)
+
+#define dct_sel_interleave_addr(pvt)    \
+	(((pvt->dram_ctl_select_low) >> 6) & 0x3)
+
+enum {
+	F10_DCTL_SEL_LOW_DctSelHiRngEn	= BIT(0),
+	F10_DCTL_SEL_LOW_DctSelIntLvEn	= BIT(2),
+	F10_DCTL_SEL_LOW_DctGangEn	= BIT(4),
+	F10_DCTL_SEL_LOW_DctDatIntLv	= BIT(5),
+	F10_DCTL_SEL_LOW_DramEnable	= BIT(8),
+	F10_DCTL_SEL_LOW_MemCleared	= BIT(10),
+};
+
+#define    dct_high_range_enabled(pvt)    \
+	(pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctSelHiRngEn)
+
+#define dct_interleave_enabled(pvt)	   \
+	(pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctSelIntLvEn)
+
+#define dct_ganging_enabled(pvt)        \
+	(pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctGangEn)
+
+#define dct_data_intlv_enabled(pvt)    \
+	(pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctDatIntLv)
+
+#define dct_dram_enabled(pvt)    \
+	(pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DramEnable)
+
+#define dct_memory_cleared(pvt)    \
+	(pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_MemCleared)
+
+
+#define F10_DCTL_SEL_HIGH		0x114
+
+
+/*
+ * Function 3 - Misc Control
+ */
+#define K8_NBCTL			0x40
+
+/* Correctable ECC error reporting enable */
+#define K8_NBCTL_CECCEn			BIT(0)
+
+/* UnCorrectable ECC error reporting enable */
+#define K8_NBCTL_UECCEn			BIT(1)
+
+#define K8_NBCFG			0x44
+#define K8_NBCFG_CHIPKILL		BIT(23)
+#define K8_NBCFG_ECC_ENABLE		BIT(22)
+
+#define K8_NBSL				0x48
+
+
+/* Family F10h: Normalized Extended Error Codes */
+#define F10_NBSL_EXT_ERR_RES		0x0
+#define F10_NBSL_EXT_ERR_ECC		0x8
+
+/* Next two are overloaded values */
+#define F10_NBSL_EXT_ERR_LINK_PROTO	0xB
+#define F10_NBSL_EXT_ERR_L3_PROTO	0xB
+
+#define F10_NBSL_EXT_ERR_NB_ARRAY	0xC
+#define F10_NBSL_EXT_ERR_DRAM_PARITY	0xD
+#define F10_NBSL_EXT_ERR_LINK_RETRY	0xE
+
+/* Next two are overloaded values */
+#define F10_NBSL_EXT_ERR_GART_WALK	0xF
+#define F10_NBSL_EXT_ERR_DEV_WALK	0xF
+
+/* 0x10 to 0x1B: Reserved */
+#define F10_NBSL_EXT_ERR_L3_DATA	0x1C
+#define F10_NBSL_EXT_ERR_L3_TAG		0x1D
+#define F10_NBSL_EXT_ERR_L3_LRU		0x1E
+
+/* K8: Normalized Extended Error Codes */
+#define K8_NBSL_EXT_ERR_ECC		0x0
+#define K8_NBSL_EXT_ERR_CRC		0x1
+#define K8_NBSL_EXT_ERR_SYNC		0x2
+#define K8_NBSL_EXT_ERR_MST		0x3
+#define K8_NBSL_EXT_ERR_TGT		0x4
+#define K8_NBSL_EXT_ERR_GART		0x5
+#define K8_NBSL_EXT_ERR_RMW		0x6
+#define K8_NBSL_EXT_ERR_WDT		0x7
+#define K8_NBSL_EXT_ERR_CHIPKILL_ECC	0x8
+#define K8_NBSL_EXT_ERR_DRAM_PARITY	0xD
+
+/*
+ * The following are for BUS type errors AFTER values have been normalized by
+ * shifting right
+ */
+#define K8_NBSL_PP_SRC			0x0
+#define K8_NBSL_PP_RES			0x1
+#define K8_NBSL_PP_OBS			0x2
+#define K8_NBSL_PP_GENERIC		0x3
+
+#define EXTRACT_ERR_CPU_MAP(x)		((x) & 0xF)
+
+#define K8_NBEAL			0x50
+#define K8_NBEAH			0x54
+#define K8_SCRCTRL			0x58
+
+#define F10_NB_CFG_LOW			0x88
+#define	F10_NB_CFG_LOW_ENABLE_EXT_CFG	BIT(14)
+
+#define F10_NB_CFG_HIGH			0x8C
+
+#define F10_ONLINE_SPARE		0xB0
+#define F10_ONLINE_SPARE_SWAPDONE0(x)	((x) & BIT(1))
+#define F10_ONLINE_SPARE_SWAPDONE1(x)	((x) & BIT(3))
+#define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007)
+#define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007)
+
+#define F10_NB_ARRAY_ADDR		0xB8
+
+#define F10_NB_ARRAY_DRAM_ECC		0x80000000
+
+/* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline  */
+#define SET_NB_ARRAY_ADDRESS(section)	(((section) & 0x3) << 1)
+
+#define F10_NB_ARRAY_DATA		0xBC
+
+#define SET_NB_DRAM_INJECTION_WRITE(word, bits)  \
+					(BIT(((word) & 0xF) + 20) | \
+					BIT(17) | bits)
+
+#define SET_NB_DRAM_INJECTION_READ(word, bits)  \
+					(BIT(((word) & 0xF) + 20) | \
+					BIT(16) |  bits)
+
+#define K8_NBCAP			0xE8
+#define K8_NBCAP_CORES			(BIT(12)|BIT(13))
+#define K8_NBCAP_CHIPKILL		BIT(4)
+#define K8_NBCAP_SECDED			BIT(3)
+#define K8_NBCAP_8_NODE			BIT(2)
+#define K8_NBCAP_DUAL_NODE		BIT(1)
+#define K8_NBCAP_DCT_DUAL		BIT(0)
+
+/* MSRs */
+#define K8_MSR_MCGCTL_NBE		BIT(4)
+
+#define K8_MSR_MC4CTL			0x0410
+#define K8_MSR_MC4STAT			0x0411
+#define K8_MSR_MC4ADDR			0x0412
+
+/* AMD sets the first MC device at device ID 0x18. */
+static inline int get_node_id(struct pci_dev *pdev)
+{
+	return PCI_SLOT(pdev->devfn) - 0x18;
+}
+
+enum amd64_chipset_families {
+	K8_CPUS = 0,
+	F10_CPUS,
+	F11_CPUS,
+};
+
+/* Error injection control structure */
+struct error_injection {
+	u32	section;
+	u32	word;
+	u32	bit_map;
+};
+
+struct amd64_pvt {
+	/* pci_device handles which we utilize */
+	struct pci_dev *addr_f1_ctl;
+	struct pci_dev *dram_f2_ctl;
+	struct pci_dev *misc_f3_ctl;
+
+	int mc_node_id;		/* MC index of this MC node */
+	int ext_model;		/* extended model value of this node */
+
+	struct low_ops *ops;	/* pointer to per PCI Device ID func table */
+
+	int channel_count;
+
+	/* Raw registers */
+	u32 dclr0;		/* DRAM Configuration Low DCT0 reg */
+	u32 dclr1;		/* DRAM Configuration Low DCT1 reg */
+	u32 dchr0;		/* DRAM Configuration High DCT0 reg */
+	u32 dchr1;		/* DRAM Configuration High DCT1 reg */
+	u32 nbcap;		/* North Bridge Capabilities */
+	u32 nbcfg;		/* F10 North Bridge Configuration */
+	u32 ext_nbcfg;		/* Extended F10 North Bridge Configuration */
+	u32 dhar;		/* DRAM Hoist reg */
+	u32 dbam0;		/* DRAM Base Address Mapping reg for DCT0 */
+	u32 dbam1;		/* DRAM Base Address Mapping reg for DCT1 */
+
+	/* DRAM CS Base Address Registers F2x[1,0][5C:40] */
+	u32 dcsb0[MAX_CS_COUNT];
+	u32 dcsb1[MAX_CS_COUNT];
+
+	/* DRAM CS Mask Registers F2x[1,0][6C:60] */
+	u32 dcsm0[MAX_CS_COUNT];
+	u32 dcsm1[MAX_CS_COUNT];
+
+	/*
+	 * Decoded parts of DRAM BASE and LIMIT Registers
+	 * F1x[78,70,68,60,58,50,48,40]
+	 */
+	u64 dram_base[DRAM_REG_COUNT];
+	u64 dram_limit[DRAM_REG_COUNT];
+	u8  dram_IntlvSel[DRAM_REG_COUNT];
+	u8  dram_IntlvEn[DRAM_REG_COUNT];
+	u8  dram_DstNode[DRAM_REG_COUNT];
+	u8  dram_rw_en[DRAM_REG_COUNT];
+
+	/*
+	 * The following fields are set at (load) run time, after CPU revision
+	 * has been determined, since the dct_base and dct_mask registers vary
+	 * based on revision
+	 */
+	u32 dcsb_base;		/* DCSB base bits */
+	u32 dcsm_mask;		/* DCSM mask bits */
+	u32 cs_count;		/* num chip selects (== num DCSB registers) */
+	u32 num_dcsm;		/* Number of DCSM registers */
+	u32 dcs_mask_notused;	/* DCSM notused mask bits */
+	u32 dcs_shift;		/* DCSB and DCSM shift value */
+
+	u64 top_mem;		/* top of memory below 4GB */
+	u64 top_mem2;		/* top of memory above 4GB */
+
+	u32 dram_ctl_select_low;	/* DRAM Controller Select Low Reg */
+	u32 dram_ctl_select_high;	/* DRAM Controller Select High Reg */
+	u32 online_spare;               /* On-Line spare Reg */
+
+	/* temp storage for when input is received from sysfs */
+	struct err_regs ctl_error_info;
+
+	/* place to store error injection parameters prior to issue */
+	struct error_injection injection;
+
+	/* Save old hw registers' values before we modified them */
+	u32 nbctl_mcgctl_saved;		/* When true, following 2 are valid */
+	u32 old_nbctl;
+
+	/* MC Type Index value: socket F vs Family 10h */
+	u32 mc_type_index;
+
+	/* misc settings */
+	struct flags {
+		unsigned long cf8_extcfg:1;
+		unsigned long ecc_report:1;
+	} flags;
+};
+
+struct scrubrate {
+       u32 scrubval;           /* bit pattern for scrub rate */
+       u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
+};
+
+extern struct scrubrate scrubrates[23];
+extern u32 revf_quad_ddr2_shift[16];
+extern const char *tt_msgs[4];
+extern const char *ll_msgs[4];
+extern const char *rrrr_msgs[16];
+extern const char *to_msgs[2];
+extern const char *pp_msgs[4];
+extern const char *ii_msgs[4];
+extern const char *ext_msgs[32];
+extern const char *htlink_msgs[8];
+
+#ifdef CONFIG_EDAC_DEBUG
+#define NUM_DBG_ATTRS 9
+#else
+#define NUM_DBG_ATTRS 0
+#endif
+
+#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
+#define NUM_INJ_ATTRS 5
+#else
+#define NUM_INJ_ATTRS 0
+#endif
+
+extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
+				     amd64_inj_attrs[NUM_INJ_ATTRS];
+
+/*
+ * Each of the PCI Device IDs types have their own set of hardware accessor
+ * functions and per device encoding/decoding logic.
+ */
+struct low_ops {
+	int (*probe_valid_hardware)(struct amd64_pvt *pvt);
+	int (*early_channel_count)(struct amd64_pvt *pvt);
+
+	u64 (*get_error_address)(struct mem_ctl_info *mci,
+			struct err_regs *info);
+	void (*read_dram_base_limit)(struct amd64_pvt *pvt, int dram);
+	void (*read_dram_ctl_register)(struct amd64_pvt *pvt);
+	void (*map_sysaddr_to_csrow)(struct mem_ctl_info *mci,
+					struct err_regs *info,
+					u64 SystemAddr);
+	int (*dbam_map_to_pages)(struct amd64_pvt *pvt, int dram_map);
+};
+
+struct amd64_family_type {
+	const char *ctl_name;
+	u16 addr_f1_ctl;
+	u16 misc_f3_ctl;
+	struct low_ops ops;
+};
+
+static struct amd64_family_type amd64_family_types[];
+
+static inline const char *get_amd_family_name(int index)
+{
+	return amd64_family_types[index].ctl_name;
+}
+
+static inline struct low_ops *family_ops(int index)
+{
+	return &amd64_family_types[index].ops;
+}
+
+/*
+ * For future CPU versions, verify the following as new 'slow' rates appear and
+ * modify the necessary skip values for the supported CPU.
+ */
+#define K8_MIN_SCRUB_RATE_BITS	0x0
+#define F10_MIN_SCRUB_RATE_BITS	0x5
+#define F11_MIN_SCRUB_RATE_BITS	0x6
+
+int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
+			     u64 *hole_offset, u64 *hole_size);
diff --git a/drivers/edac/amd64_edac_dbg.c b/drivers/edac/amd64_edac_dbg.c
new file mode 100644
index 0000000..59cf2cf
--- /dev/null
+++ b/drivers/edac/amd64_edac_dbg.c
@@ -0,0 +1,255 @@
+#include "amd64_edac.h"
+
+/*
+ * accept a hex value and store it into the virtual error register file, field:
+ * nbeal and nbeah. Assume virtual error values have already been set for: NBSL,
+ * NBSH and NBCFG. Then proceed to map the error values to a MC, CSROW and
+ * CHANNEL
+ */
+static ssize_t amd64_nbea_store(struct mem_ctl_info *mci, const char *data,
+				size_t count)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	unsigned long long value;
+	int ret = 0;
+
+	ret = strict_strtoull(data, 16, &value);
+	if (ret != -EINVAL) {
+		debugf0("received NBEA= 0x%llx\n", value);
+
+		/* place the value into the virtual error packet */
+		pvt->ctl_error_info.nbeal = (u32) value;
+		value >>= 32;
+		pvt->ctl_error_info.nbeah = (u32) value;
+
+		/* Process the Mapping request */
+		/* TODO: Add race prevention */
+		amd_decode_nb_mce(pvt->mc_node_id, &pvt->ctl_error_info, 1);
+
+		return count;
+	}
+	return ret;
+}
+
+/* display back what the last NBEA (MCA NB Address (MC4_ADDR)) was written */
+static ssize_t amd64_nbea_show(struct mem_ctl_info *mci, char *data)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	u64 value;
+
+	value = pvt->ctl_error_info.nbeah;
+	value <<= 32;
+	value |= pvt->ctl_error_info.nbeal;
+
+	return sprintf(data, "%llx\n", value);
+}
+
+/* store the NBSL (MCA NB Status Low (MC4_STATUS)) value user desires */
+static ssize_t amd64_nbsl_store(struct mem_ctl_info *mci, const char *data,
+				size_t count)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	unsigned long value;
+	int ret = 0;
+
+	ret = strict_strtoul(data, 16, &value);
+	if (ret != -EINVAL) {
+		debugf0("received NBSL= 0x%lx\n", value);
+
+		pvt->ctl_error_info.nbsl = (u32) value;
+
+		return count;
+	}
+	return ret;
+}
+
+/* display back what the last NBSL value written */
+static ssize_t amd64_nbsl_show(struct mem_ctl_info *mci, char *data)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	u32 value;
+
+	value = pvt->ctl_error_info.nbsl;
+
+	return sprintf(data, "%x\n", value);
+}
+
+/* store the NBSH (MCA NB Status High) value user desires */
+static ssize_t amd64_nbsh_store(struct mem_ctl_info *mci, const char *data,
+				size_t count)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	unsigned long value;
+	int ret = 0;
+
+	ret = strict_strtoul(data, 16, &value);
+	if (ret != -EINVAL) {
+		debugf0("received NBSH= 0x%lx\n", value);
+
+		pvt->ctl_error_info.nbsh = (u32) value;
+
+		return count;
+	}
+	return ret;
+}
+
+/* display back what the last NBSH value written */
+static ssize_t amd64_nbsh_show(struct mem_ctl_info *mci, char *data)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	u32 value;
+
+	value = pvt->ctl_error_info.nbsh;
+
+	return sprintf(data, "%x\n", value);
+}
+
+/* accept and store the NBCFG (MCA NB Configuration) value user desires */
+static ssize_t amd64_nbcfg_store(struct mem_ctl_info *mci,
+					const char *data, size_t count)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	unsigned long value;
+	int ret = 0;
+
+	ret = strict_strtoul(data, 16, &value);
+	if (ret != -EINVAL) {
+		debugf0("received NBCFG= 0x%lx\n", value);
+
+		pvt->ctl_error_info.nbcfg = (u32) value;
+
+		return count;
+	}
+	return ret;
+}
+
+/* various show routines for the controls of a MCI */
+static ssize_t amd64_nbcfg_show(struct mem_ctl_info *mci, char *data)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+
+	return sprintf(data, "%x\n", pvt->ctl_error_info.nbcfg);
+}
+
+
+static ssize_t amd64_dhar_show(struct mem_ctl_info *mci, char *data)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+
+	return sprintf(data, "%x\n", pvt->dhar);
+}
+
+
+static ssize_t amd64_dbam_show(struct mem_ctl_info *mci, char *data)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+
+	return sprintf(data, "%x\n", pvt->dbam0);
+}
+
+
+static ssize_t amd64_topmem_show(struct mem_ctl_info *mci, char *data)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+
+	return sprintf(data, "%llx\n", pvt->top_mem);
+}
+
+
+static ssize_t amd64_topmem2_show(struct mem_ctl_info *mci, char *data)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+
+	return sprintf(data, "%llx\n", pvt->top_mem2);
+}
+
+static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data)
+{
+	u64 hole_base = 0;
+	u64 hole_offset = 0;
+	u64 hole_size = 0;
+
+	amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
+
+	return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
+						 hole_size);
+}
+
+/*
+ * update NUM_DBG_ATTRS in case you add new members
+ */
+struct mcidev_sysfs_attribute amd64_dbg_attrs[] = {
+
+	{
+		.attr = {
+			.name = "nbea_ctl",
+			.mode = (S_IRUGO | S_IWUSR)
+		},
+		.show = amd64_nbea_show,
+		.store = amd64_nbea_store,
+	},
+	{
+		.attr = {
+			.name = "nbsl_ctl",
+			.mode = (S_IRUGO | S_IWUSR)
+		},
+		.show = amd64_nbsl_show,
+		.store = amd64_nbsl_store,
+	},
+	{
+		.attr = {
+			.name = "nbsh_ctl",
+			.mode = (S_IRUGO | S_IWUSR)
+		},
+		.show = amd64_nbsh_show,
+		.store = amd64_nbsh_store,
+	},
+	{
+		.attr = {
+			.name = "nbcfg_ctl",
+			.mode = (S_IRUGO | S_IWUSR)
+		},
+		.show = amd64_nbcfg_show,
+		.store = amd64_nbcfg_store,
+	},
+	{
+		.attr = {
+			.name = "dhar",
+			.mode = (S_IRUGO)
+		},
+		.show = amd64_dhar_show,
+		.store = NULL,
+	},
+	{
+		.attr = {
+			.name = "dbam",
+			.mode = (S_IRUGO)
+		},
+		.show = amd64_dbam_show,
+		.store = NULL,
+	},
+	{
+		.attr = {
+			.name = "topmem",
+			.mode = (S_IRUGO)
+		},
+		.show = amd64_topmem_show,
+		.store = NULL,
+	},
+	{
+		.attr = {
+			.name = "topmem2",
+			.mode = (S_IRUGO)
+		},
+		.show = amd64_topmem2_show,
+		.store = NULL,
+	},
+	{
+		.attr = {
+			.name = "dram_hole",
+			.mode = (S_IRUGO)
+		},
+		.show = amd64_hole_show,
+		.store = NULL,
+	},
+};
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
new file mode 100644
index 0000000..29f1f7a
--- /dev/null
+++ b/drivers/edac/amd64_edac_inj.c
@@ -0,0 +1,222 @@
+#include "amd64_edac.h"
+
+static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	return sprintf(buf, "0x%x\n", pvt->injection.section);
+}
+
+/*
+ * store error injection section value which refers to one of 4 16-byte sections
+ * within a 64-byte cacheline
+ *
+ * range: 0..3
+ */
+static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
+					  const char *data, size_t count)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	unsigned long value;
+	int ret = 0;
+
+	ret = strict_strtoul(data, 10, &value);
+	if (ret != -EINVAL) {
+
+		if (value > 3) {
+			amd64_printk(KERN_WARNING,
+				     "%s: invalid section 0x%lx\n",
+				     __func__, value);
+			return -EINVAL;
+		}
+
+		pvt->injection.section = (u32) value;
+		return count;
+	}
+	return ret;
+}
+
+static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	return sprintf(buf, "0x%x\n", pvt->injection.word);
+}
+
+/*
+ * store error injection word value which refers to one of 9 16-bit word of the
+ * 16-byte (128-bit + ECC bits) section
+ *
+ * range: 0..8
+ */
+static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
+					const char *data, size_t count)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	unsigned long value;
+	int ret = 0;
+
+	ret = strict_strtoul(data, 10, &value);
+	if (ret != -EINVAL) {
+
+		if (value > 8) {
+			amd64_printk(KERN_WARNING,
+				     "%s: invalid word 0x%lx\n",
+				     __func__, value);
+			return -EINVAL;
+		}
+
+		pvt->injection.word = (u32) value;
+		return count;
+	}
+	return ret;
+}
+
+static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
+}
+
+/*
+ * store 16 bit error injection vector which enables injecting errors to the
+ * corresponding bit within the error injection word above. When used during a
+ * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
+ */
+static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
+					     const char *data, size_t count)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	unsigned long value;
+	int ret = 0;
+
+	ret = strict_strtoul(data, 16, &value);
+	if (ret != -EINVAL) {
+
+		if (value & 0xFFFF0000) {
+			amd64_printk(KERN_WARNING,
+				     "%s: invalid EccVector: 0x%lx\n",
+				     __func__, value);
+			return -EINVAL;
+		}
+
+		pvt->injection.bit_map = (u32) value;
+		return count;
+	}
+	return ret;
+}
+
+/*
+ * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
+ * fields needed by the injection registers and read the NB Array Data Port.
+ */
+static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
+					const char *data, size_t count)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	unsigned long value;
+	u32 section, word_bits;
+	int ret = 0;
+
+	ret = strict_strtoul(data, 10, &value);
+	if (ret != -EINVAL) {
+
+		/* Form value to choose 16-byte section of cacheline */
+		section = F10_NB_ARRAY_DRAM_ECC |
+				SET_NB_ARRAY_ADDRESS(pvt->injection.section);
+		pci_write_config_dword(pvt->misc_f3_ctl,
+					F10_NB_ARRAY_ADDR, section);
+
+		word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word,
+						pvt->injection.bit_map);
+
+		/* Issue 'word' and 'bit' along with the READ request */
+		pci_write_config_dword(pvt->misc_f3_ctl,
+					F10_NB_ARRAY_DATA, word_bits);
+
+		debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
+
+		return count;
+	}
+	return ret;
+}
+
+/*
+ * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
+ * fields needed by the injection registers.
+ */
+static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
+					const char *data, size_t count)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	unsigned long value;
+	u32 section, word_bits;
+	int ret = 0;
+
+	ret = strict_strtoul(data, 10, &value);
+	if (ret != -EINVAL) {
+
+		/* Form value to choose 16-byte section of cacheline */
+		section = F10_NB_ARRAY_DRAM_ECC |
+				SET_NB_ARRAY_ADDRESS(pvt->injection.section);
+		pci_write_config_dword(pvt->misc_f3_ctl,
+					F10_NB_ARRAY_ADDR, section);
+
+		word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word,
+						pvt->injection.bit_map);
+
+		/* Issue 'word' and 'bit' along with the READ request */
+		pci_write_config_dword(pvt->misc_f3_ctl,
+					F10_NB_ARRAY_DATA, word_bits);
+
+		debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
+
+		return count;
+	}
+	return ret;
+}
+
+/*
+ * update NUM_INJ_ATTRS in case you add new members
+ */
+struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
+
+	{
+		.attr = {
+			.name = "inject_section",
+			.mode = (S_IRUGO | S_IWUSR)
+		},
+		.show = amd64_inject_section_show,
+		.store = amd64_inject_section_store,
+	},
+	{
+		.attr = {
+			.name = "inject_word",
+			.mode = (S_IRUGO | S_IWUSR)
+		},
+		.show = amd64_inject_word_show,
+		.store = amd64_inject_word_store,
+	},
+	{
+		.attr = {
+			.name = "inject_ecc_vector",
+			.mode = (S_IRUGO | S_IWUSR)
+		},
+		.show = amd64_inject_ecc_vector_show,
+		.store = amd64_inject_ecc_vector_store,
+	},
+	{
+		.attr = {
+			.name = "inject_write",
+			.mode = (S_IRUGO | S_IWUSR)
+		},
+		.show = NULL,
+		.store = amd64_inject_write_store,
+	},
+	{
+		.attr = {
+			.name = "inject_read",
+			.mode = (S_IRUGO | S_IWUSR)
+		},
+		.show = NULL,
+		.store = amd64_inject_read_store,
+	},
+};
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
new file mode 100644
index 0000000..2b95f1a
--- /dev/null
+++ b/drivers/edac/amd76x_edac.c
@@ -0,0 +1,367 @@
+/*
+ * AMD 76x Memory Controller kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ *	http://www.anime.net/~goemon/linux-ecc/
+ *
+ * $Id: edac_amd76x.c,v 1.4.2.5 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define AMD76X_REVISION	" Ver: 2.0.2 "  __DATE__
+#define EDAC_MOD_STR	"amd76x_edac"
+
+#define amd76x_printk(level, fmt, arg...) \
+	edac_printk(level, "amd76x", fmt, ##arg)
+
+#define amd76x_mc_printk(mci, level, fmt, arg...) \
+	edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg)
+
+#define AMD76X_NR_CSROWS 8
+#define AMD76X_NR_CHANS  1
+#define AMD76X_NR_DIMMS  4
+
+/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
+
+#define AMD76X_ECC_MODE_STATUS	0x48	/* Mode and status of ECC (32b)
+					 *
+					 * 31:16 reserved
+					 * 15:14 SERR enabled: x1=ue 1x=ce
+					 * 13    reserved
+					 * 12    diag: disabled, enabled
+					 * 11:10 mode: dis, EC, ECC, ECC+scrub
+					 *  9:8  status: x1=ue 1x=ce
+					 *  7:4  UE cs row
+					 *  3:0  CE cs row
+					 */
+
+#define AMD76X_DRAM_MODE_STATUS	0x58	/* DRAM Mode and status (32b)
+					 *
+					 * 31:26 clock disable 5 - 0
+					 * 25    SDRAM init
+					 * 24    reserved
+					 * 23    mode register service
+					 * 22:21 suspend to RAM
+					 * 20    burst refresh enable
+					 * 19    refresh disable
+					 * 18    reserved
+					 * 17:16 cycles-per-refresh
+					 * 15:8  reserved
+					 *  7:0  x4 mode enable 7 - 0
+					 */
+
+#define AMD76X_MEM_BASE_ADDR	0xC0	/* Memory base address (8 x 32b)
+					 *
+					 * 31:23 chip-select base
+					 * 22:16 reserved
+					 * 15:7  chip-select mask
+					 *  6:3  reserved
+					 *  2:1  address mode
+					 *  0    chip-select enable
+					 */
+
+struct amd76x_error_info {
+	u32 ecc_mode_status;
+};
+
+enum amd76x_chips {
+	AMD761 = 0,
+	AMD762
+};
+
+struct amd76x_dev_info {
+	const char *ctl_name;
+};
+
+static const struct amd76x_dev_info amd76x_devs[] = {
+	[AMD761] = {
+		.ctl_name = "AMD761"},
+	[AMD762] = {
+		.ctl_name = "AMD762"},
+};
+
+static struct edac_pci_ctl_info *amd76x_pci;
+
+/**
+ *	amd76x_get_error_info	-	fetch error information
+ *	@mci: Memory controller
+ *	@info: Info to fill in
+ *
+ *	Fetch and store the AMD76x ECC status. Clear pending status
+ *	on the chip so that further errors will be reported
+ */
+static void amd76x_get_error_info(struct mem_ctl_info *mci,
+				struct amd76x_error_info *info)
+{
+	struct pci_dev *pdev;
+
+	pdev = to_pci_dev(mci->dev);
+	pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS,
+			&info->ecc_mode_status);
+
+	if (info->ecc_mode_status & BIT(8))
+		pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS,
+				 (u32) BIT(8), (u32) BIT(8));
+
+	if (info->ecc_mode_status & BIT(9))
+		pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS,
+				 (u32) BIT(9), (u32) BIT(9));
+}
+
+/**
+ *	amd76x_process_error_info	-	Error check
+ *	@mci: Memory controller
+ *	@info: Previously fetched information from chip
+ *	@handle_errors: 1 if we should do recovery
+ *
+ *	Process the chip state and decide if an error has occurred.
+ *	A return of 1 indicates an error. Also if handle_errors is true
+ *	then attempt to handle and clean up after the error
+ */
+static int amd76x_process_error_info(struct mem_ctl_info *mci,
+				struct amd76x_error_info *info,
+				int handle_errors)
+{
+	int error_found;
+	u32 row;
+
+	error_found = 0;
+
+	/*
+	 *      Check for an uncorrectable error
+	 */
+	if (info->ecc_mode_status & BIT(8)) {
+		error_found = 1;
+
+		if (handle_errors) {
+			row = (info->ecc_mode_status >> 4) & 0xf;
+			edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0,
+					row, mci->ctl_name);
+		}
+	}
+
+	/*
+	 *      Check for a correctable error
+	 */
+	if (info->ecc_mode_status & BIT(9)) {
+		error_found = 1;
+
+		if (handle_errors) {
+			row = info->ecc_mode_status & 0xf;
+			edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0,
+					0, row, 0, mci->ctl_name);
+		}
+	}
+
+	return error_found;
+}
+
+/**
+ *	amd76x_check	-	Poll the controller
+ *	@mci: Memory controller
+ *
+ *	Called by the poll handlers this function reads the status
+ *	from the controller and checks for errors.
+ */
+static void amd76x_check(struct mem_ctl_info *mci)
+{
+	struct amd76x_error_info info;
+	debugf3("%s()\n", __func__);
+	amd76x_get_error_info(mci, &info);
+	amd76x_process_error_info(mci, &info, 1);
+}
+
+static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
+			enum edac_type edac_mode)
+{
+	struct csrow_info *csrow;
+	u32 mba, mba_base, mba_mask, dms;
+	int index;
+
+	for (index = 0; index < mci->nr_csrows; index++) {
+		csrow = &mci->csrows[index];
+
+		/* find the DRAM Chip Select Base address and mask */
+		pci_read_config_dword(pdev,
+				AMD76X_MEM_BASE_ADDR + (index * 4), &mba);
+
+		if (!(mba & BIT(0)))
+			continue;
+
+		mba_base = mba & 0xff800000UL;
+		mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
+		pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms);
+		csrow->first_page = mba_base >> PAGE_SHIFT;
+		csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
+		csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+		csrow->page_mask = mba_mask >> PAGE_SHIFT;
+		csrow->grain = csrow->nr_pages << PAGE_SHIFT;
+		csrow->mtype = MEM_RDDR;
+		csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
+		csrow->edac_mode = edac_mode;
+	}
+}
+
+/**
+ *	amd76x_probe1	-	Perform set up for detected device
+ *	@pdev; PCI device detected
+ *	@dev_idx: Device type index
+ *
+ *	We have found an AMD76x and now need to set up the memory
+ *	controller status reporting. We configure and set up the
+ *	memory controller reporting and claim the device.
+ */
+static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
+{
+	static const enum edac_type ems_modes[] = {
+		EDAC_NONE,
+		EDAC_EC,
+		EDAC_SECDED,
+		EDAC_SECDED
+	};
+	struct mem_ctl_info *mci = NULL;
+	u32 ems;
+	u32 ems_mode;
+	struct amd76x_error_info discard;
+
+	debugf0("%s()\n", __func__);
+	pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
+	ems_mode = (ems >> 10) & 0x3;
+	mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS, 0);
+
+	if (mci == NULL) {
+		return -ENOMEM;
+	}
+
+	debugf0("%s(): mci = %p\n", __func__, mci);
+	mci->dev = &pdev->dev;
+	mci->mtype_cap = MEM_FLAG_RDDR;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+	mci->edac_cap = ems_mode ?
+		(EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
+	mci->mod_name = EDAC_MOD_STR;
+	mci->mod_ver = AMD76X_REVISION;
+	mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
+	mci->dev_name = pci_name(pdev);
+	mci->edac_check = amd76x_check;
+	mci->ctl_page_to_phys = NULL;
+
+	amd76x_init_csrows(mci, pdev, ems_modes[ems_mode]);
+	amd76x_get_error_info(mci, &discard);	/* clear counters */
+
+	/* Here we assume that we will never see multiple instances of this
+	 * type of memory controller.  The ID is therefore hardcoded to 0.
+	 */
+	if (edac_mc_add_mc(mci)) {
+		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+		goto fail;
+	}
+
+	/* allocating generic PCI control info */
+	amd76x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!amd76x_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n",
+			__func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
+
+	/* get this far and it's successful */
+	debugf3("%s(): success\n", __func__);
+	return 0;
+
+fail:
+	edac_mc_free(mci);
+	return -ENODEV;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit amd76x_init_one(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	debugf0("%s()\n", __func__);
+
+	/* don't need to call pci_device_enable() */
+	return amd76x_probe1(pdev, ent->driver_data);
+}
+
+/**
+ *	amd76x_remove_one	-	driver shutdown
+ *	@pdev: PCI device being handed back
+ *
+ *	Called when the driver is unloaded. Find the matching mci
+ *	structure for the device then delete the mci and free the
+ *	resources.
+ */
+static void __devexit amd76x_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+
+	debugf0("%s()\n", __func__);
+
+	if (amd76x_pci)
+		edac_pci_release_generic_ctl(amd76x_pci);
+
+	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+		return;
+
+	edac_mc_free(mci);
+}
+
+static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
+	{
+	 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 AMD762},
+	{
+	 PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 AMD761},
+	{
+	 0,
+	 }			/* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl);
+
+static struct pci_driver amd76x_driver = {
+	.name = EDAC_MOD_STR,
+	.probe = amd76x_init_one,
+	.remove = __devexit_p(amd76x_remove_one),
+	.id_table = amd76x_pci_tbl,
+};
+
+static int __init amd76x_init(void)
+{
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
+	return pci_register_driver(&amd76x_driver);
+}
+
+static void __exit amd76x_exit(void)
+{
+	pci_unregister_driver(&amd76x_driver);
+}
+
+module_init(amd76x_init);
+module_exit(amd76x_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
+MODULE_DESCRIPTION("MC support for AMD 76x memory controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
new file mode 100644
index 0000000..35b78d0
--- /dev/null
+++ b/drivers/edac/amd8111_edac.c
@@ -0,0 +1,594 @@
+/*
+ * amd8111_edac.c, AMD8111 Hyper Transport chip EDAC kernel module
+ *
+ * Copyright (c) 2008 Wind River Systems, Inc.
+ *
+ * Authors:	Cao Qingtao <qingtao.cao@windriver.com>
+ * 		Benjamin Walsh <benjamin.walsh@windriver.com>
+ * 		Hu Yongqi <yongqi.hu@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/edac.h>
+#include <linux/pci_ids.h>
+#include <asm/io.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+#include "amd8111_edac.h"
+
+#define AMD8111_EDAC_REVISION	" Ver: 1.0.0 " __DATE__
+#define AMD8111_EDAC_MOD_STR	"amd8111_edac"
+
+#define PCI_DEVICE_ID_AMD_8111_PCI	0x7460
+
+enum amd8111_edac_devs {
+	LPC_BRIDGE = 0,
+};
+
+enum amd8111_edac_pcis {
+	PCI_BRIDGE = 0,
+};
+
+/* Wrapper functions for accessing PCI configuration space */
+static int edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32)
+{
+	int ret;
+
+	ret = pci_read_config_dword(dev, reg, val32);
+	if (ret != 0)
+		printk(KERN_ERR AMD8111_EDAC_MOD_STR
+			" PCI Access Read Error at 0x%x\n", reg);
+
+	return ret;
+}
+
+static void edac_pci_read_byte(struct pci_dev *dev, int reg, u8 *val8)
+{
+	int ret;
+
+	ret = pci_read_config_byte(dev, reg, val8);
+	if (ret != 0)
+		printk(KERN_ERR AMD8111_EDAC_MOD_STR
+			" PCI Access Read Error at 0x%x\n", reg);
+}
+
+static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32)
+{
+	int ret;
+
+	ret = pci_write_config_dword(dev, reg, val32);
+	if (ret != 0)
+		printk(KERN_ERR AMD8111_EDAC_MOD_STR
+			" PCI Access Write Error at 0x%x\n", reg);
+}
+
+static void edac_pci_write_byte(struct pci_dev *dev, int reg, u8 val8)
+{
+	int ret;
+
+	ret = pci_write_config_byte(dev, reg, val8);
+	if (ret != 0)
+		printk(KERN_ERR AMD8111_EDAC_MOD_STR
+			" PCI Access Write Error at 0x%x\n", reg);
+}
+
+/*
+ * device-specific methods for amd8111 PCI Bridge Controller
+ *
+ * Error Reporting and Handling for amd8111 chipset could be found
+ * in its datasheet 3.1.2 section, P37
+ */
+static void amd8111_pci_bridge_init(struct amd8111_pci_info *pci_info)
+{
+	u32 val32;
+	struct pci_dev *dev = pci_info->dev;
+
+	/* First clear error detection flags on the host interface */
+
+	/* Clear SSE/SMA/STA flags in the global status register*/
+	edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
+	if (val32 & PCI_STSCMD_CLEAR_MASK)
+		edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
+
+	/* Clear CRC and Link Fail flags in HT Link Control reg */
+	edac_pci_read_dword(dev, REG_HT_LINK, &val32);
+	if (val32 & HT_LINK_CLEAR_MASK)
+		edac_pci_write_dword(dev, REG_HT_LINK, val32);
+
+	/* Second clear all fault on the secondary interface */
+
+	/* Clear error flags in the memory-base limit reg. */
+	edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
+	if (val32 & MEM_LIMIT_CLEAR_MASK)
+		edac_pci_write_dword(dev, REG_MEM_LIM, val32);
+
+	/* Clear Discard Timer Expired flag in Interrupt/Bridge Control reg */
+	edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
+	if (val32 & PCI_INTBRG_CTRL_CLEAR_MASK)
+		edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
+
+	/* Last enable error detections */
+	if (edac_op_state == EDAC_OPSTATE_POLL) {
+		/* Enable System Error reporting in global status register */
+		edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
+		val32 |= PCI_STSCMD_SERREN;
+		edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
+
+		/* Enable CRC Sync flood packets to HyperTransport Link */
+		edac_pci_read_dword(dev, REG_HT_LINK, &val32);
+		val32 |= HT_LINK_CRCFEN;
+		edac_pci_write_dword(dev, REG_HT_LINK, val32);
+
+		/* Enable SSE reporting etc in Interrupt control reg */
+		edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
+		val32 |= PCI_INTBRG_CTRL_POLL_MASK;
+		edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
+	}
+}
+
+static void amd8111_pci_bridge_exit(struct amd8111_pci_info *pci_info)
+{
+	u32 val32;
+	struct pci_dev *dev = pci_info->dev;
+
+	if (edac_op_state == EDAC_OPSTATE_POLL) {
+		/* Disable System Error reporting */
+		edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
+		val32 &= ~PCI_STSCMD_SERREN;
+		edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
+
+		/* Disable CRC flood packets */
+		edac_pci_read_dword(dev, REG_HT_LINK, &val32);
+		val32 &= ~HT_LINK_CRCFEN;
+		edac_pci_write_dword(dev, REG_HT_LINK, val32);
+
+		/* Disable DTSERREN/MARSP/SERREN in Interrupt Control reg */
+		edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
+		val32 &= ~PCI_INTBRG_CTRL_POLL_MASK;
+		edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
+	}
+}
+
+static void amd8111_pci_bridge_check(struct edac_pci_ctl_info *edac_dev)
+{
+	struct amd8111_pci_info *pci_info = edac_dev->pvt_info;
+	struct pci_dev *dev = pci_info->dev;
+	u32 val32;
+
+	/* Check out PCI Bridge Status and Command Register */
+	edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
+	if (val32 & PCI_STSCMD_CLEAR_MASK) {
+		printk(KERN_INFO "Error(s) in PCI bridge status and command"
+			"register on device %s\n", pci_info->ctl_name);
+		printk(KERN_INFO "SSE: %d, RMA: %d, RTA: %d\n",
+			(val32 & PCI_STSCMD_SSE) != 0,
+			(val32 & PCI_STSCMD_RMA) != 0,
+			(val32 & PCI_STSCMD_RTA) != 0);
+
+		val32 |= PCI_STSCMD_CLEAR_MASK;
+		edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
+
+		edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
+	}
+
+	/* Check out HyperTransport Link Control Register */
+	edac_pci_read_dword(dev, REG_HT_LINK, &val32);
+	if (val32 & HT_LINK_LKFAIL) {
+		printk(KERN_INFO "Error(s) in hypertransport link control"
+			"register on device %s\n", pci_info->ctl_name);
+		printk(KERN_INFO "LKFAIL: %d\n",
+			(val32 & HT_LINK_LKFAIL) != 0);
+
+		val32 |= HT_LINK_LKFAIL;
+		edac_pci_write_dword(dev, REG_HT_LINK, val32);
+
+		edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
+	}
+
+	/* Check out PCI Interrupt and Bridge Control Register */
+	edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
+	if (val32 & PCI_INTBRG_CTRL_DTSTAT) {
+		printk(KERN_INFO "Error(s) in PCI interrupt and bridge control"
+			"register on device %s\n", pci_info->ctl_name);
+		printk(KERN_INFO "DTSTAT: %d\n",
+			(val32 & PCI_INTBRG_CTRL_DTSTAT) != 0);
+
+		val32 |= PCI_INTBRG_CTRL_DTSTAT;
+		edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
+
+		edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
+	}
+
+	/* Check out PCI Bridge Memory Base-Limit Register */
+	edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
+	if (val32 & MEM_LIMIT_CLEAR_MASK) {
+		printk(KERN_INFO
+			"Error(s) in mem limit register on %s device\n",
+			pci_info->ctl_name);
+		printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n"
+			"RTA: %d, STA: %d, MDPE: %d\n",
+			(val32 & MEM_LIMIT_DPE)  != 0,
+			(val32 & MEM_LIMIT_RSE)  != 0,
+			(val32 & MEM_LIMIT_RMA)  != 0,
+			(val32 & MEM_LIMIT_RTA)  != 0,
+			(val32 & MEM_LIMIT_STA)  != 0,
+			(val32 & MEM_LIMIT_MDPE) != 0);
+
+		val32 |= MEM_LIMIT_CLEAR_MASK;
+		edac_pci_write_dword(dev, REG_MEM_LIM, val32);
+
+		edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
+	}
+}
+
+static struct resource *legacy_io_res;
+static int at_compat_reg_broken;
+#define LEGACY_NR_PORTS	1
+
+/* device-specific methods for amd8111 LPC Bridge device */
+static void amd8111_lpc_bridge_init(struct amd8111_dev_info *dev_info)
+{
+	u8 val8;
+	struct pci_dev *dev = dev_info->dev;
+
+	/* First clear REG_AT_COMPAT[SERR, IOCHK] if necessary */
+	legacy_io_res = request_region(REG_AT_COMPAT, LEGACY_NR_PORTS,
+					AMD8111_EDAC_MOD_STR);
+	if (!legacy_io_res)
+		printk(KERN_INFO "%s: failed to request legacy I/O region "
+			"start %d, len %d\n", __func__,
+			REG_AT_COMPAT, LEGACY_NR_PORTS);
+	else {
+		val8 = __do_inb(REG_AT_COMPAT);
+		if (val8 == 0xff) { /* buggy port */
+			printk(KERN_INFO "%s: port %d is buggy, not supported"
+				" by hardware?\n", __func__, REG_AT_COMPAT);
+			at_compat_reg_broken = 1;
+			release_region(REG_AT_COMPAT, LEGACY_NR_PORTS);
+			legacy_io_res = NULL;
+		} else {
+			u8 out8 = 0;
+			if (val8 & AT_COMPAT_SERR)
+				out8 = AT_COMPAT_CLRSERR;
+			if (val8 & AT_COMPAT_IOCHK)
+				out8 |= AT_COMPAT_CLRIOCHK;
+			if (out8 > 0)
+				__do_outb(out8, REG_AT_COMPAT);
+		}
+	}
+
+	/* Second clear error flags on LPC bridge */
+	edac_pci_read_byte(dev, REG_IO_CTRL_1, &val8);
+	if (val8 & IO_CTRL_1_CLEAR_MASK)
+		edac_pci_write_byte(dev, REG_IO_CTRL_1, val8);
+}
+
+static void amd8111_lpc_bridge_exit(struct amd8111_dev_info *dev_info)
+{
+	if (legacy_io_res)
+		release_region(REG_AT_COMPAT, LEGACY_NR_PORTS);
+}
+
+static void amd8111_lpc_bridge_check(struct edac_device_ctl_info *edac_dev)
+{
+	struct amd8111_dev_info *dev_info = edac_dev->pvt_info;
+	struct pci_dev *dev = dev_info->dev;
+	u8 val8;
+
+	edac_pci_read_byte(dev, REG_IO_CTRL_1, &val8);
+	if (val8 & IO_CTRL_1_CLEAR_MASK) {
+		printk(KERN_INFO
+			"Error(s) in IO control register on %s device\n",
+			dev_info->ctl_name);
+		printk(KERN_INFO "LPC ERR: %d, PW2LPC: %d\n",
+			(val8 & IO_CTRL_1_LPC_ERR) != 0,
+			(val8 & IO_CTRL_1_PW2LPC) != 0);
+
+		val8 |= IO_CTRL_1_CLEAR_MASK;
+		edac_pci_write_byte(dev, REG_IO_CTRL_1, val8);
+
+		edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+	}
+
+	if (at_compat_reg_broken == 0) {
+		u8 out8 = 0;
+		val8 = __do_inb(REG_AT_COMPAT);
+		if (val8 & AT_COMPAT_SERR)
+			out8 = AT_COMPAT_CLRSERR;
+		if (val8 & AT_COMPAT_IOCHK)
+			out8 |= AT_COMPAT_CLRIOCHK;
+		if (out8 > 0) {
+			__do_outb(out8, REG_AT_COMPAT);
+			edac_device_handle_ue(edac_dev, 0, 0,
+						edac_dev->ctl_name);
+		}
+	}
+}
+
+/* General devices represented by edac_device_ctl_info */
+static struct amd8111_dev_info amd8111_devices[] = {
+	[LPC_BRIDGE] = {
+		.err_dev = PCI_DEVICE_ID_AMD_8111_LPC,
+		.ctl_name = "lpc",
+		.init = amd8111_lpc_bridge_init,
+		.exit = amd8111_lpc_bridge_exit,
+		.check = amd8111_lpc_bridge_check,
+	},
+	{0},
+};
+
+/* PCI controllers represented by edac_pci_ctl_info */
+static struct amd8111_pci_info amd8111_pcis[] = {
+	[PCI_BRIDGE] = {
+		.err_dev = PCI_DEVICE_ID_AMD_8111_PCI,
+		.ctl_name = "AMD8111_PCI_Controller",
+		.init = amd8111_pci_bridge_init,
+		.exit = amd8111_pci_bridge_exit,
+		.check = amd8111_pci_bridge_check,
+	},
+	{0},
+};
+
+static int amd8111_dev_probe(struct pci_dev *dev,
+				const struct pci_device_id *id)
+{
+	struct amd8111_dev_info *dev_info = &amd8111_devices[id->driver_data];
+
+	dev_info->dev = pci_get_device(PCI_VENDOR_ID_AMD,
+					dev_info->err_dev, NULL);
+
+	if (!dev_info->dev) {
+		printk(KERN_ERR "EDAC device not found:"
+			"vendor %x, device %x, name %s\n",
+			PCI_VENDOR_ID_AMD, dev_info->err_dev,
+			dev_info->ctl_name);
+		return -ENODEV;
+	}
+
+	if (pci_enable_device(dev_info->dev)) {
+		pci_dev_put(dev_info->dev);
+		printk(KERN_ERR "failed to enable:"
+			"vendor %x, device %x, name %s\n",
+			PCI_VENDOR_ID_AMD, dev_info->err_dev,
+			dev_info->ctl_name);
+		return -ENODEV;
+	}
+
+	/*
+	 * we do not allocate extra private structure for
+	 * edac_device_ctl_info, but make use of existing
+	 * one instead.
+	*/
+	dev_info->edac_idx = edac_device_alloc_index();
+	dev_info->edac_dev =
+		edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1,
+					   NULL, 0, 0,
+					   NULL, 0, dev_info->edac_idx);
+	if (!dev_info->edac_dev)
+		return -ENOMEM;
+
+	dev_info->edac_dev->pvt_info = dev_info;
+	dev_info->edac_dev->dev = &dev_info->dev->dev;
+	dev_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
+	dev_info->edac_dev->ctl_name = dev_info->ctl_name;
+	dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
+
+	if (edac_op_state == EDAC_OPSTATE_POLL)
+		dev_info->edac_dev->edac_check = dev_info->check;
+
+	if (dev_info->init)
+		dev_info->init(dev_info);
+
+	if (edac_device_add_device(dev_info->edac_dev) > 0) {
+		printk(KERN_ERR "failed to add edac_dev for %s\n",
+			dev_info->ctl_name);
+		edac_device_free_ctl_info(dev_info->edac_dev);
+		return -ENODEV;
+	}
+
+	printk(KERN_INFO "added one edac_dev on AMD8111 "
+		"vendor %x, device %x, name %s\n",
+		PCI_VENDOR_ID_AMD, dev_info->err_dev,
+		dev_info->ctl_name);
+
+	return 0;
+}
+
+static void amd8111_dev_remove(struct pci_dev *dev)
+{
+	struct amd8111_dev_info *dev_info;
+
+	for (dev_info = amd8111_devices; dev_info->err_dev; dev_info++)
+		if (dev_info->dev->device == dev->device)
+			break;
+
+	if (!dev_info->err_dev)	/* should never happen */
+		return;
+
+	if (dev_info->edac_dev) {
+		edac_device_del_device(dev_info->edac_dev->dev);
+		edac_device_free_ctl_info(dev_info->edac_dev);
+	}
+
+	if (dev_info->exit)
+		dev_info->exit(dev_info);
+
+	pci_dev_put(dev_info->dev);
+}
+
+static int amd8111_pci_probe(struct pci_dev *dev,
+				const struct pci_device_id *id)
+{
+	struct amd8111_pci_info *pci_info = &amd8111_pcis[id->driver_data];
+
+	pci_info->dev = pci_get_device(PCI_VENDOR_ID_AMD,
+					pci_info->err_dev, NULL);
+
+	if (!pci_info->dev) {
+		printk(KERN_ERR "EDAC device not found:"
+			"vendor %x, device %x, name %s\n",
+			PCI_VENDOR_ID_AMD, pci_info->err_dev,
+			pci_info->ctl_name);
+		return -ENODEV;
+	}
+
+	if (pci_enable_device(pci_info->dev)) {
+		pci_dev_put(pci_info->dev);
+		printk(KERN_ERR "failed to enable:"
+			"vendor %x, device %x, name %s\n",
+			PCI_VENDOR_ID_AMD, pci_info->err_dev,
+			pci_info->ctl_name);
+		return -ENODEV;
+	}
+
+	/*
+	 * we do not allocate extra private structure for
+	 * edac_pci_ctl_info, but make use of existing
+	 * one instead.
+	*/
+	pci_info->edac_idx = edac_pci_alloc_index();
+	pci_info->edac_dev = edac_pci_alloc_ctl_info(0, pci_info->ctl_name);
+	if (!pci_info->edac_dev)
+		return -ENOMEM;
+
+	pci_info->edac_dev->pvt_info = pci_info;
+	pci_info->edac_dev->dev = &pci_info->dev->dev;
+	pci_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
+	pci_info->edac_dev->ctl_name = pci_info->ctl_name;
+	pci_info->edac_dev->dev_name = dev_name(&pci_info->dev->dev);
+
+	if (edac_op_state == EDAC_OPSTATE_POLL)
+		pci_info->edac_dev->edac_check = pci_info->check;
+
+	if (pci_info->init)
+		pci_info->init(pci_info);
+
+	if (edac_pci_add_device(pci_info->edac_dev, pci_info->edac_idx) > 0) {
+		printk(KERN_ERR "failed to add edac_pci for %s\n",
+			pci_info->ctl_name);
+		edac_pci_free_ctl_info(pci_info->edac_dev);
+		return -ENODEV;
+	}
+
+	printk(KERN_INFO "added one edac_pci on AMD8111 "
+		"vendor %x, device %x, name %s\n",
+		PCI_VENDOR_ID_AMD, pci_info->err_dev,
+		pci_info->ctl_name);
+
+	return 0;
+}
+
+static void amd8111_pci_remove(struct pci_dev *dev)
+{
+	struct amd8111_pci_info *pci_info;
+
+	for (pci_info = amd8111_pcis; pci_info->err_dev; pci_info++)
+		if (pci_info->dev->device == dev->device)
+			break;
+
+	if (!pci_info->err_dev)	/* should never happen */
+		return;
+
+	if (pci_info->edac_dev) {
+		edac_pci_del_device(pci_info->edac_dev->dev);
+		edac_pci_free_ctl_info(pci_info->edac_dev);
+	}
+
+	if (pci_info->exit)
+		pci_info->exit(pci_info);
+
+	pci_dev_put(pci_info->dev);
+}
+
+/* PCI Device ID talbe for general EDAC device */
+static const struct pci_device_id amd8111_edac_dev_tbl[] = {
+	{
+	PCI_VEND_DEV(AMD, 8111_LPC),
+	.subvendor = PCI_ANY_ID,
+	.subdevice = PCI_ANY_ID,
+	.class = 0,
+	.class_mask = 0,
+	.driver_data = LPC_BRIDGE,
+	},
+	{
+	0,
+	}			/* table is NULL-terminated */
+};
+MODULE_DEVICE_TABLE(pci, amd8111_edac_dev_tbl);
+
+static struct pci_driver amd8111_edac_dev_driver = {
+	.name = "AMD8111_EDAC_DEV",
+	.probe = amd8111_dev_probe,
+	.remove = amd8111_dev_remove,
+	.id_table = amd8111_edac_dev_tbl,
+};
+
+/* PCI Device ID table for EDAC PCI controller */
+static const struct pci_device_id amd8111_edac_pci_tbl[] = {
+	{
+	PCI_VEND_DEV(AMD, 8111_PCI),
+	.subvendor = PCI_ANY_ID,
+	.subdevice = PCI_ANY_ID,
+	.class = 0,
+	.class_mask = 0,
+	.driver_data = PCI_BRIDGE,
+	},
+	{
+	0,
+	}			/* table is NULL-terminated */
+};
+MODULE_DEVICE_TABLE(pci, amd8111_edac_pci_tbl);
+
+static struct pci_driver amd8111_edac_pci_driver = {
+	.name = "AMD8111_EDAC_PCI",
+	.probe = amd8111_pci_probe,
+	.remove = amd8111_pci_remove,
+	.id_table = amd8111_edac_pci_tbl,
+};
+
+static int __init amd8111_edac_init(void)
+{
+	int val;
+
+	printk(KERN_INFO "AMD8111 EDAC driver "	AMD8111_EDAC_REVISION "\n");
+	printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n");
+
+	/* Only POLL mode supported so far */
+	edac_op_state = EDAC_OPSTATE_POLL;
+
+	val = pci_register_driver(&amd8111_edac_dev_driver);
+	val |= pci_register_driver(&amd8111_edac_pci_driver);
+
+	return val;
+}
+
+static void __exit amd8111_edac_exit(void)
+{
+	pci_unregister_driver(&amd8111_edac_pci_driver);
+	pci_unregister_driver(&amd8111_edac_dev_driver);
+}
+
+
+module_init(amd8111_edac_init);
+module_exit(amd8111_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>\n");
+MODULE_DESCRIPTION("AMD8111 HyperTransport I/O Hub EDAC kernel module");
diff --git a/drivers/edac/amd8111_edac.h b/drivers/edac/amd8111_edac.h
new file mode 100644
index 0000000..3579433
--- /dev/null
+++ b/drivers/edac/amd8111_edac.h
@@ -0,0 +1,130 @@
+/*
+ * amd8111_edac.h, EDAC defs for AMD8111 hypertransport chip
+ *
+ * Copyright (c) 2008 Wind River Systems, Inc.
+ *
+ * Authors:	Cao Qingtao <qingtao.cao@windriver.com>
+ * 		Benjamin Walsh <benjamin.walsh@windriver.com>
+ * 		Hu Yongqi <yongqi.hu@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _AMD8111_EDAC_H_
+#define _AMD8111_EDAC_H_
+
+/************************************************************
+ *	PCI Bridge Status and Command Register, DevA:0x04
+ ************************************************************/
+#define REG_PCI_STSCMD	0x04
+enum pci_stscmd_bits {
+	PCI_STSCMD_SSE		= BIT(30),
+	PCI_STSCMD_RMA		= BIT(29),
+	PCI_STSCMD_RTA		= BIT(28),
+	PCI_STSCMD_SERREN	= BIT(8),
+	PCI_STSCMD_CLEAR_MASK	= (PCI_STSCMD_SSE |
+				   PCI_STSCMD_RMA |
+				   PCI_STSCMD_RTA)
+};
+
+/************************************************************
+ *	PCI Bridge Memory Base-Limit Register, DevA:0x1c
+ ************************************************************/
+#define REG_MEM_LIM     0x1c
+enum mem_limit_bits {
+	MEM_LIMIT_DPE   = BIT(31),
+	MEM_LIMIT_RSE   = BIT(30),
+	MEM_LIMIT_RMA   = BIT(29),
+	MEM_LIMIT_RTA   = BIT(28),
+	MEM_LIMIT_STA   = BIT(27),
+	MEM_LIMIT_MDPE  = BIT(24),
+	MEM_LIMIT_CLEAR_MASK  = (MEM_LIMIT_DPE |
+				 MEM_LIMIT_RSE |
+				 MEM_LIMIT_RMA |
+				 MEM_LIMIT_RTA |
+				 MEM_LIMIT_STA |
+				 MEM_LIMIT_MDPE)
+};
+
+/************************************************************
+ *	HyperTransport Link Control Register, DevA:0xc4
+ ************************************************************/
+#define REG_HT_LINK	0xc4
+enum ht_link_bits {
+	HT_LINK_LKFAIL	= BIT(4),
+	HT_LINK_CRCFEN	= BIT(1),
+	HT_LINK_CLEAR_MASK = (HT_LINK_LKFAIL)
+};
+
+/************************************************************
+ *	PCI Bridge Interrupt and Bridge Control, DevA:0x3c
+ ************************************************************/
+#define REG_PCI_INTBRG_CTRL	0x3c
+enum pci_intbrg_ctrl_bits {
+	PCI_INTBRG_CTRL_DTSERREN	= BIT(27),
+	PCI_INTBRG_CTRL_DTSTAT		= BIT(26),
+	PCI_INTBRG_CTRL_MARSP		= BIT(21),
+	PCI_INTBRG_CTRL_SERREN		= BIT(17),
+	PCI_INTBRG_CTRL_PEREN		= BIT(16),
+	PCI_INTBRG_CTRL_CLEAR_MASK	= (PCI_INTBRG_CTRL_DTSTAT),
+	PCI_INTBRG_CTRL_POLL_MASK	= (PCI_INTBRG_CTRL_DTSERREN |
+					   PCI_INTBRG_CTRL_MARSP |
+					   PCI_INTBRG_CTRL_SERREN)
+};
+
+/************************************************************
+ *		I/O Control 1 Register, DevB:0x40
+ ************************************************************/
+#define REG_IO_CTRL_1 0x40
+enum io_ctrl_1_bits {
+	IO_CTRL_1_NMIONERR	= BIT(7),
+	IO_CTRL_1_LPC_ERR	= BIT(6),
+	IO_CTRL_1_PW2LPC	= BIT(1),
+	IO_CTRL_1_CLEAR_MASK	= (IO_CTRL_1_LPC_ERR | IO_CTRL_1_PW2LPC)
+};
+
+/************************************************************
+ *		Legacy I/O Space Registers
+ ************************************************************/
+#define REG_AT_COMPAT 0x61
+enum at_compat_bits {
+	AT_COMPAT_SERR		= BIT(7),
+	AT_COMPAT_IOCHK		= BIT(6),
+	AT_COMPAT_CLRIOCHK	= BIT(3),
+	AT_COMPAT_CLRSERR	= BIT(2),
+};
+
+struct amd8111_dev_info {
+	u16 err_dev;	/* PCI Device ID */
+	struct pci_dev *dev;
+	int edac_idx;	/* device index */
+	char *ctl_name;
+	struct edac_device_ctl_info *edac_dev;
+	void (*init)(struct amd8111_dev_info *dev_info);
+	void (*exit)(struct amd8111_dev_info *dev_info);
+	void (*check)(struct edac_device_ctl_info *edac_dev);
+};
+
+struct amd8111_pci_info {
+	u16 err_dev;	/* PCI Device ID */
+	struct pci_dev *dev;
+	int edac_idx;	/* pci index */
+	const char *ctl_name;
+	struct edac_pci_ctl_info *edac_dev;
+	void (*init)(struct amd8111_pci_info *dev_info);
+	void (*exit)(struct amd8111_pci_info *dev_info);
+	void (*check)(struct edac_pci_ctl_info *edac_dev);
+};
+
+#endif /* _AMD8111_EDAC_H_ */
diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c
new file mode 100644
index 0000000..b432d60
--- /dev/null
+++ b/drivers/edac/amd8131_edac.c
@@ -0,0 +1,379 @@
+/*
+ * amd8131_edac.c, AMD8131 hypertransport chip EDAC kernel module
+ *
+ * Copyright (c) 2008 Wind River Systems, Inc.
+ *
+ * Authors:	Cao Qingtao <qingtao.cao@windriver.com>
+ * 		Benjamin Walsh <benjamin.walsh@windriver.com>
+ * 		Hu Yongqi <yongqi.hu@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/edac.h>
+#include <linux/pci_ids.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+#include "amd8131_edac.h"
+
+#define AMD8131_EDAC_REVISION	" Ver: 1.0.0 " __DATE__
+#define AMD8131_EDAC_MOD_STR	"amd8131_edac"
+
+/* Wrapper functions for accessing PCI configuration space */
+static void edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32)
+{
+	int ret;
+
+	ret = pci_read_config_dword(dev, reg, val32);
+	if (ret != 0)
+		printk(KERN_ERR AMD8131_EDAC_MOD_STR
+			" PCI Access Read Error at 0x%x\n", reg);
+}
+
+static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32)
+{
+	int ret;
+
+	ret = pci_write_config_dword(dev, reg, val32);
+	if (ret != 0)
+		printk(KERN_ERR AMD8131_EDAC_MOD_STR
+			" PCI Access Write Error at 0x%x\n", reg);
+}
+
+static char * const bridge_str[] = {
+	[NORTH_A] = "NORTH A",
+	[NORTH_B] = "NORTH B",
+	[SOUTH_A] = "SOUTH A",
+	[SOUTH_B] = "SOUTH B",
+	[NO_BRIDGE] = "NO BRIDGE",
+};
+
+/* Support up to two AMD8131 chipsets on a platform */
+static struct amd8131_dev_info amd8131_devices[] = {
+	{
+	.inst = NORTH_A,
+	.devfn = DEVFN_PCIX_BRIDGE_NORTH_A,
+	.ctl_name = "AMD8131_PCIX_NORTH_A",
+	},
+	{
+	.inst = NORTH_B,
+	.devfn = DEVFN_PCIX_BRIDGE_NORTH_B,
+	.ctl_name = "AMD8131_PCIX_NORTH_B",
+	},
+	{
+	.inst = SOUTH_A,
+	.devfn = DEVFN_PCIX_BRIDGE_SOUTH_A,
+	.ctl_name = "AMD8131_PCIX_SOUTH_A",
+	},
+	{
+	.inst = SOUTH_B,
+	.devfn = DEVFN_PCIX_BRIDGE_SOUTH_B,
+	.ctl_name = "AMD8131_PCIX_SOUTH_B",
+	},
+	{.inst = NO_BRIDGE,},
+};
+
+static void amd8131_pcix_init(struct amd8131_dev_info *dev_info)
+{
+	u32 val32;
+	struct pci_dev *dev = dev_info->dev;
+
+	/* First clear error detection flags */
+	edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
+	if (val32 & MEM_LIMIT_MASK)
+		edac_pci_write_dword(dev, REG_MEM_LIM, val32);
+
+	/* Clear Discard Timer Timedout flag */
+	edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
+	if (val32 & INT_CTLR_DTS)
+		edac_pci_write_dword(dev, REG_INT_CTLR, val32);
+
+	/* Clear CRC Error flag on link side A */
+	edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
+	if (val32 & LNK_CTRL_CRCERR_A)
+		edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
+
+	/* Clear CRC Error flag on link side B */
+	edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
+	if (val32 & LNK_CTRL_CRCERR_B)
+		edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
+
+	/*
+	 * Then enable all error detections.
+	 *
+	 * Setup Discard Timer Sync Flood Enable,
+	 * System Error Enable and Parity Error Enable.
+	 */
+	edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
+	val32 |= INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE;
+	edac_pci_write_dword(dev, REG_INT_CTLR, val32);
+
+	/* Enable overall SERR Error detection */
+	edac_pci_read_dword(dev, REG_STS_CMD, &val32);
+	val32 |= STS_CMD_SERREN;
+	edac_pci_write_dword(dev, REG_STS_CMD, val32);
+
+	/* Setup CRC Flood Enable for link side A */
+	edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
+	val32 |= LNK_CTRL_CRCFEN;
+	edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
+
+	/* Setup CRC Flood Enable for link side B */
+	edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
+	val32 |= LNK_CTRL_CRCFEN;
+	edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
+}
+
+static void amd8131_pcix_exit(struct amd8131_dev_info *dev_info)
+{
+	u32 val32;
+	struct pci_dev *dev = dev_info->dev;
+
+	/* Disable SERR, PERR and DTSE Error detection */
+	edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
+	val32 &= ~(INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE);
+	edac_pci_write_dword(dev, REG_INT_CTLR, val32);
+
+	/* Disable overall System Error detection */
+	edac_pci_read_dword(dev, REG_STS_CMD, &val32);
+	val32 &= ~STS_CMD_SERREN;
+	edac_pci_write_dword(dev, REG_STS_CMD, val32);
+
+	/* Disable CRC Sync Flood on link side A */
+	edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
+	val32 &= ~LNK_CTRL_CRCFEN;
+	edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
+
+	/* Disable CRC Sync Flood on link side B */
+	edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
+	val32 &= ~LNK_CTRL_CRCFEN;
+	edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
+}
+
+static void amd8131_pcix_check(struct edac_pci_ctl_info *edac_dev)
+{
+	struct amd8131_dev_info *dev_info = edac_dev->pvt_info;
+	struct pci_dev *dev = dev_info->dev;
+	u32 val32;
+
+	/* Check PCI-X Bridge Memory Base-Limit Register for errors */
+	edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
+	if (val32 & MEM_LIMIT_MASK) {
+		printk(KERN_INFO "Error(s) in mem limit register "
+			"on %s bridge\n", dev_info->ctl_name);
+		printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n"
+			"RTA: %d, STA: %d, MDPE: %d\n",
+			val32 & MEM_LIMIT_DPE,
+			val32 & MEM_LIMIT_RSE,
+			val32 & MEM_LIMIT_RMA,
+			val32 & MEM_LIMIT_RTA,
+			val32 & MEM_LIMIT_STA,
+			val32 & MEM_LIMIT_MDPE);
+
+		val32 |= MEM_LIMIT_MASK;
+		edac_pci_write_dword(dev, REG_MEM_LIM, val32);
+
+		edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
+	}
+
+	/* Check if Discard Timer timed out */
+	edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
+	if (val32 & INT_CTLR_DTS) {
+		printk(KERN_INFO "Error(s) in interrupt and control register "
+			"on %s bridge\n", dev_info->ctl_name);
+		printk(KERN_INFO "DTS: %d\n", val32 & INT_CTLR_DTS);
+
+		val32 |= INT_CTLR_DTS;
+		edac_pci_write_dword(dev, REG_INT_CTLR, val32);
+
+		edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
+	}
+
+	/* Check if CRC error happens on link side A */
+	edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
+	if (val32 & LNK_CTRL_CRCERR_A) {
+		printk(KERN_INFO "Error(s) in link conf and control register "
+			"on %s bridge\n", dev_info->ctl_name);
+		printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_A);
+
+		val32 |= LNK_CTRL_CRCERR_A;
+		edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
+
+		edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
+	}
+
+	/* Check if CRC error happens on link side B */
+	edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
+	if (val32 & LNK_CTRL_CRCERR_B) {
+		printk(KERN_INFO "Error(s) in link conf and control register "
+			"on %s bridge\n", dev_info->ctl_name);
+		printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_B);
+
+		val32 |= LNK_CTRL_CRCERR_B;
+		edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
+
+		edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
+	}
+}
+
+static struct amd8131_info amd8131_chipset = {
+	.err_dev = PCI_DEVICE_ID_AMD_8131_APIC,
+	.devices = amd8131_devices,
+	.init = amd8131_pcix_init,
+	.exit = amd8131_pcix_exit,
+	.check = amd8131_pcix_check,
+};
+
+/*
+ * There are 4 PCIX Bridges on ATCA-6101 that share the same PCI Device ID,
+ * so amd8131_probe() would be called by kernel 4 times, with different
+ * address of pci_dev for each of them each time.
+ */
+static int amd8131_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	struct amd8131_dev_info *dev_info;
+
+	for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE;
+		dev_info++)
+		if (dev_info->devfn == dev->devfn)
+			break;
+
+	if (dev_info->inst == NO_BRIDGE) /* should never happen */
+		return -ENODEV;
+
+	/*
+	 * We can't call pci_get_device() as we are used to do because
+	 * there are 4 of them but pci_dev_get() instead.
+	 */
+	dev_info->dev = pci_dev_get(dev);
+
+	if (pci_enable_device(dev_info->dev)) {
+		pci_dev_put(dev_info->dev);
+		printk(KERN_ERR "failed to enable:"
+			"vendor %x, device %x, devfn %x, name %s\n",
+			PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
+			dev_info->devfn, dev_info->ctl_name);
+		return -ENODEV;
+	}
+
+	/*
+	 * we do not allocate extra private structure for
+	 * edac_pci_ctl_info, but make use of existing
+	 * one instead.
+	 */
+	dev_info->edac_idx = edac_pci_alloc_index();
+	dev_info->edac_dev = edac_pci_alloc_ctl_info(0, dev_info->ctl_name);
+	if (!dev_info->edac_dev)
+		return -ENOMEM;
+
+	dev_info->edac_dev->pvt_info = dev_info;
+	dev_info->edac_dev->dev = &dev_info->dev->dev;
+	dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR;
+	dev_info->edac_dev->ctl_name = dev_info->ctl_name;
+	dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
+
+	if (edac_op_state == EDAC_OPSTATE_POLL)
+		dev_info->edac_dev->edac_check = amd8131_chipset.check;
+
+	if (amd8131_chipset.init)
+		amd8131_chipset.init(dev_info);
+
+	if (edac_pci_add_device(dev_info->edac_dev, dev_info->edac_idx) > 0) {
+		printk(KERN_ERR "failed edac_pci_add_device() for %s\n",
+			dev_info->ctl_name);
+		edac_pci_free_ctl_info(dev_info->edac_dev);
+		return -ENODEV;
+	}
+
+	printk(KERN_INFO "added one device on AMD8131 "
+		"vendor %x, device %x, devfn %x, name %s\n",
+		PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
+		dev_info->devfn, dev_info->ctl_name);
+
+	return 0;
+}
+
+static void amd8131_remove(struct pci_dev *dev)
+{
+	struct amd8131_dev_info *dev_info;
+
+	for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE;
+		dev_info++)
+		if (dev_info->devfn == dev->devfn)
+			break;
+
+	if (dev_info->inst == NO_BRIDGE) /* should never happen */
+		return;
+
+	if (dev_info->edac_dev) {
+		edac_pci_del_device(dev_info->edac_dev->dev);
+		edac_pci_free_ctl_info(dev_info->edac_dev);
+	}
+
+	if (amd8131_chipset.exit)
+		amd8131_chipset.exit(dev_info);
+
+	pci_dev_put(dev_info->dev);
+}
+
+static const struct pci_device_id amd8131_edac_pci_tbl[] = {
+	{
+	PCI_VEND_DEV(AMD, 8131_BRIDGE),
+	.subvendor = PCI_ANY_ID,
+	.subdevice = PCI_ANY_ID,
+	.class = 0,
+	.class_mask = 0,
+	.driver_data = 0,
+	},
+	{
+	0,
+	}			/* table is NULL-terminated */
+};
+MODULE_DEVICE_TABLE(pci, amd8131_edac_pci_tbl);
+
+static struct pci_driver amd8131_edac_driver = {
+	.name = AMD8131_EDAC_MOD_STR,
+	.probe = amd8131_probe,
+	.remove = amd8131_remove,
+	.id_table = amd8131_edac_pci_tbl,
+};
+
+static int __init amd8131_edac_init(void)
+{
+	printk(KERN_INFO "AMD8131 EDAC driver " AMD8131_EDAC_REVISION "\n");
+	printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n");
+
+	/* Only POLL mode supported so far */
+	edac_op_state = EDAC_OPSTATE_POLL;
+
+	return pci_register_driver(&amd8131_edac_driver);
+}
+
+static void __exit amd8131_edac_exit(void)
+{
+	pci_unregister_driver(&amd8131_edac_driver);
+}
+
+module_init(amd8131_edac_init);
+module_exit(amd8131_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>\n");
+MODULE_DESCRIPTION("AMD8131 HyperTransport PCI-X Tunnel EDAC kernel module");
diff --git a/drivers/edac/amd8131_edac.h b/drivers/edac/amd8131_edac.h
new file mode 100644
index 0000000..60e0d1c
--- /dev/null
+++ b/drivers/edac/amd8131_edac.h
@@ -0,0 +1,119 @@
+/*
+ * amd8131_edac.h, EDAC defs for AMD8131 hypertransport chip
+ *
+ * Copyright (c) 2008 Wind River Systems, Inc.
+ *
+ * Authors:	Cao Qingtao <qingtao.cao@windriver.com>
+ * 		Benjamin Walsh <benjamin.walsh@windriver.com>
+ * 		Hu Yongqi <yongqi.hu@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _AMD8131_EDAC_H_
+#define _AMD8131_EDAC_H_
+
+#define DEVFN_PCIX_BRIDGE_NORTH_A	8
+#define DEVFN_PCIX_BRIDGE_NORTH_B	16
+#define DEVFN_PCIX_BRIDGE_SOUTH_A	24
+#define DEVFN_PCIX_BRIDGE_SOUTH_B	32
+
+/************************************************************
+ *	PCI-X Bridge Status and Command Register, DevA:0x04
+ ************************************************************/
+#define REG_STS_CMD	0x04
+enum sts_cmd_bits {
+	STS_CMD_SSE	= BIT(30),
+	STS_CMD_SERREN	= BIT(8)
+};
+
+/************************************************************
+ *	PCI-X Bridge Interrupt and Bridge Control Register,
+ ************************************************************/
+#define REG_INT_CTLR	0x3c
+enum int_ctlr_bits {
+	INT_CTLR_DTSE	= BIT(27),
+	INT_CTLR_DTS	= BIT(26),
+	INT_CTLR_SERR	= BIT(17),
+	INT_CTLR_PERR	= BIT(16)
+};
+
+/************************************************************
+ *	PCI-X Bridge Memory Base-Limit Register, DevA:0x1C
+ ************************************************************/
+#define REG_MEM_LIM	0x1c
+enum mem_limit_bits {
+	MEM_LIMIT_DPE 	= BIT(31),
+	MEM_LIMIT_RSE 	= BIT(30),
+	MEM_LIMIT_RMA 	= BIT(29),
+	MEM_LIMIT_RTA 	= BIT(28),
+	MEM_LIMIT_STA	= BIT(27),
+	MEM_LIMIT_MDPE	= BIT(24),
+	MEM_LIMIT_MASK	= MEM_LIMIT_DPE|MEM_LIMIT_RSE|MEM_LIMIT_RMA|
+				MEM_LIMIT_RTA|MEM_LIMIT_STA|MEM_LIMIT_MDPE
+};
+
+/************************************************************
+ *	Link Configuration And Control Register, side A
+ ************************************************************/
+#define REG_LNK_CTRL_A	0xc4
+
+/************************************************************
+ *	Link Configuration And Control Register, side B
+ ************************************************************/
+#define REG_LNK_CTRL_B  0xc8
+
+enum lnk_ctrl_bits {
+	LNK_CTRL_CRCERR_A	= BIT(9),
+	LNK_CTRL_CRCERR_B	= BIT(8),
+	LNK_CTRL_CRCFEN		= BIT(1)
+};
+
+enum pcix_bridge_inst {
+	NORTH_A = 0,
+	NORTH_B = 1,
+	SOUTH_A = 2,
+	SOUTH_B = 3,
+	NO_BRIDGE = 4
+};
+
+struct amd8131_dev_info {
+	int devfn;
+	enum pcix_bridge_inst inst;
+	struct pci_dev *dev;
+	int edac_idx;	/* pci device index */
+	char *ctl_name;
+	struct edac_pci_ctl_info *edac_dev;
+};
+
+/*
+ * AMD8131 chipset has two pairs of PCIX Bridge and related IOAPIC
+ * Controler, and ATCA-6101 has two AMD8131 chipsets, so there are
+ * four PCIX Bridges on ATCA-6101 altogether.
+ *
+ * These PCIX Bridges share the same PCI Device ID and are all of
+ * Function Zero, they could be discrimated by their pci_dev->devfn.
+ * They share the same set of init/check/exit methods, and their
+ * private structures are collected in the devices[] array.
+ */
+struct amd8131_info {
+	u16 err_dev;	/* PCI Device ID for AMD8131 APIC*/
+	struct amd8131_dev_info *devices;
+	void (*init)(struct amd8131_dev_info *dev_info);
+	void (*exit)(struct amd8131_dev_info *dev_info);
+	void (*check)(struct edac_pci_ctl_info *edac_dev);
+};
+
+#endif /* _AMD8131_EDAC_H_ */
+
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
new file mode 100644
index 0000000..c973004
--- /dev/null
+++ b/drivers/edac/cell_edac.c
@@ -0,0 +1,262 @@
+/*
+ * Cell MIC driver for ECC counting
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ *                <benh@kernel.crashing.org>
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ */
+#undef DEBUG
+
+#include <linux/edac.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/stop_machine.h>
+#include <linux/io.h>
+#include <asm/machdep.h>
+#include <asm/cell-regs.h>
+
+#include "edac_core.h"
+
+struct cell_edac_priv
+{
+	struct cbe_mic_tm_regs __iomem	*regs;
+	int				node;
+	int				chanmask;
+#ifdef DEBUG
+	u64				prev_fir;
+#endif
+};
+
+static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
+{
+	struct cell_edac_priv		*priv = mci->pvt_info;
+	struct csrow_info		*csrow = &mci->csrows[0];
+	unsigned long			address, pfn, offset, syndrome;
+
+	dev_dbg(mci->dev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n",
+		priv->node, chan, ar);
+
+	/* Address decoding is likely a bit bogus, to dbl check */
+	address = (ar & 0xffffffffe0000000ul) >> 29;
+	if (priv->chanmask == 0x3)
+		address = (address << 1) | chan;
+	pfn = address >> PAGE_SHIFT;
+	offset = address & ~PAGE_MASK;
+	syndrome = (ar & 0x000000001fe00000ul) >> 21;
+
+	/* TODO: Decoding of the error addresss */
+	edac_mc_handle_ce(mci, csrow->first_page + pfn, offset,
+			  syndrome, 0, chan, "");
+}
+
+static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
+{
+	struct cell_edac_priv		*priv = mci->pvt_info;
+	struct csrow_info		*csrow = &mci->csrows[0];
+	unsigned long			address, pfn, offset;
+
+	dev_dbg(mci->dev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n",
+		priv->node, chan, ar);
+
+	/* Address decoding is likely a bit bogus, to dbl check */
+	address = (ar & 0xffffffffe0000000ul) >> 29;
+	if (priv->chanmask == 0x3)
+		address = (address << 1) | chan;
+	pfn = address >> PAGE_SHIFT;
+	offset = address & ~PAGE_MASK;
+
+	/* TODO: Decoding of the error addresss */
+	edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, "");
+}
+
+static void cell_edac_check(struct mem_ctl_info *mci)
+{
+	struct cell_edac_priv		*priv = mci->pvt_info;
+	u64				fir, addreg, clear = 0;
+
+	fir = in_be64(&priv->regs->mic_fir);
+#ifdef DEBUG
+	if (fir != priv->prev_fir) {
+		dev_dbg(mci->dev, "fir change : 0x%016lx\n", fir);
+		priv->prev_fir = fir;
+	}
+#endif
+	if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_SINGLE_0_ERR)) {
+		addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
+		clear |= CBE_MIC_FIR_ECC_SINGLE_0_RESET;
+		cell_edac_count_ce(mci, 0, addreg);
+	}
+	if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_SINGLE_1_ERR)) {
+		addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
+		clear |= CBE_MIC_FIR_ECC_SINGLE_1_RESET;
+		cell_edac_count_ce(mci, 1, addreg);
+	}
+	if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_MULTI_0_ERR)) {
+		addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
+		clear |= CBE_MIC_FIR_ECC_MULTI_0_RESET;
+		cell_edac_count_ue(mci, 0, addreg);
+	}
+	if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_MULTI_1_ERR)) {
+		addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
+		clear |= CBE_MIC_FIR_ECC_MULTI_1_RESET;
+		cell_edac_count_ue(mci, 1, addreg);
+	}
+
+	/* The procedure for clearing FIR bits is a bit ... weird */
+	if (clear) {
+		fir &= ~(CBE_MIC_FIR_ECC_ERR_MASK | CBE_MIC_FIR_ECC_SET_MASK);
+		fir |= CBE_MIC_FIR_ECC_RESET_MASK;
+		fir &= ~clear;
+		out_be64(&priv->regs->mic_fir, fir);
+		(void)in_be64(&priv->regs->mic_fir);
+
+		mb();	/* sync up */
+#ifdef DEBUG
+		fir = in_be64(&priv->regs->mic_fir);
+		dev_dbg(mci->dev, "fir clear  : 0x%016lx\n", fir);
+#endif
+	}
+}
+
+static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
+{
+	struct csrow_info		*csrow = &mci->csrows[0];
+	struct cell_edac_priv		*priv = mci->pvt_info;
+	struct device_node		*np;
+
+	for (np = NULL;
+	     (np = of_find_node_by_name(np, "memory")) != NULL;) {
+		struct resource r;
+
+		/* We "know" that the Cell firmware only creates one entry
+		 * in the "memory" nodes. If that changes, this code will
+		 * need to be adapted.
+		 */
+		if (of_address_to_resource(np, 0, &r))
+			continue;
+		if (of_node_to_nid(np) != priv->node)
+			continue;
+		csrow->first_page = r.start >> PAGE_SHIFT;
+		csrow->nr_pages = (r.end - r.start + 1) >> PAGE_SHIFT;
+		csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+		csrow->mtype = MEM_XDR;
+		csrow->edac_mode = EDAC_SECDED;
+		dev_dbg(mci->dev,
+			"Initialized on node %d, chanmask=0x%x,"
+			" first_page=0x%lx, nr_pages=0x%x\n",
+			priv->node, priv->chanmask,
+			csrow->first_page, csrow->nr_pages);
+		break;
+	}
+}
+
+static int __devinit cell_edac_probe(struct platform_device *pdev)
+{
+	struct cbe_mic_tm_regs __iomem	*regs;
+	struct mem_ctl_info		*mci;
+	struct cell_edac_priv		*priv;
+	u64				reg;
+	int				rc, chanmask;
+
+	regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
+	if (regs == NULL)
+		return -ENODEV;
+
+	edac_op_state = EDAC_OPSTATE_POLL;
+
+	/* Get channel population */
+	reg = in_be64(&regs->mic_mnt_cfg);
+	dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016llx\n", reg);
+	chanmask = 0;
+	if (reg & CBE_MIC_MNT_CFG_CHAN_0_POP)
+		chanmask |= 0x1;
+	if (reg & CBE_MIC_MNT_CFG_CHAN_1_POP)
+		chanmask |= 0x2;
+	if (chanmask == 0) {
+		dev_warn(&pdev->dev,
+			 "Yuck ! No channel populated ? Aborting !\n");
+		return -ENODEV;
+	}
+	dev_dbg(&pdev->dev, "Initial FIR = 0x%016llx\n",
+		in_be64(&regs->mic_fir));
+
+	/* Allocate & init EDAC MC data structure */
+	mci = edac_mc_alloc(sizeof(struct cell_edac_priv), 1,
+			    chanmask == 3 ? 2 : 1, pdev->id);
+	if (mci == NULL)
+		return -ENOMEM;
+	priv = mci->pvt_info;
+	priv->regs = regs;
+	priv->node = pdev->id;
+	priv->chanmask = chanmask;
+	mci->dev = &pdev->dev;
+	mci->mtype_cap = MEM_FLAG_XDR;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+	mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+	mci->mod_name = "cell_edac";
+	mci->ctl_name = "MIC";
+	mci->dev_name = dev_name(&pdev->dev);
+	mci->edac_check = cell_edac_check;
+	cell_edac_init_csrows(mci);
+
+	/* Register with EDAC core */
+	rc = edac_mc_add_mc(mci);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to register with EDAC core\n");
+		edac_mc_free(mci);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int __devexit cell_edac_remove(struct platform_device *pdev)
+{
+	struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
+	if (mci)
+		edac_mc_free(mci);
+	return 0;
+}
+
+static struct platform_driver cell_edac_driver = {
+	.driver		= {
+		.name	= "cbe-mic",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= cell_edac_probe,
+	.remove		= __devexit_p(cell_edac_remove),
+};
+
+static int __init cell_edac_init(void)
+{
+	/* Sanity check registers data structure */
+	BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+			      mic_df_ecc_address_0) != 0xf8);
+	BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+			      mic_df_ecc_address_1) != 0x1b8);
+	BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+			      mic_df_config) != 0x218);
+	BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+			      mic_fir) != 0x230);
+	BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+			      mic_mnt_cfg) != 0x210);
+	BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+			      mic_exc) != 0x208);
+
+	return platform_driver_register(&cell_edac_driver);
+}
+
+static void __exit cell_edac_exit(void)
+{
+	platform_driver_unregister(&cell_edac_driver);
+}
+
+module_init(cell_edac_init);
+module_exit(cell_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("ECC counting for Cell MIC");
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
new file mode 100644
index 0000000..3d50274
--- /dev/null
+++ b/drivers/edac/cpc925_edac.c
@@ -0,0 +1,1017 @@
+/*
+ * cpc925_edac.c, EDAC driver for IBM CPC925 Bridge and Memory Controller.
+ *
+ * Copyright (c) 2008 Wind River Systems, Inc.
+ *
+ * Authors:	Cao Qingtao <qingtao.cao@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/edac.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#define CPC925_EDAC_REVISION	" Ver: 1.0.0 " __DATE__
+#define CPC925_EDAC_MOD_STR	"cpc925_edac"
+
+#define cpc925_printk(level, fmt, arg...) \
+	edac_printk(level, "CPC925", fmt, ##arg)
+
+#define cpc925_mc_printk(mci, level, fmt, arg...) \
+	edac_mc_chipset_printk(mci, level, "CPC925", fmt, ##arg)
+
+/*
+ * CPC925 registers are of 32 bits with bit0 defined at the
+ * most significant bit and bit31 at that of least significant.
+ */
+#define CPC925_BITS_PER_REG	32
+#define CPC925_BIT(nr)		(1UL << (CPC925_BITS_PER_REG - 1 - nr))
+
+/*
+ * EDAC device names for the error detections of
+ * CPU Interface and Hypertransport Link.
+ */
+#define CPC925_CPU_ERR_DEV	"cpu"
+#define CPC925_HT_LINK_DEV	"htlink"
+
+/* Suppose DDR Refresh cycle is 15.6 microsecond */
+#define CPC925_REF_FREQ		0xFA69
+#define CPC925_SCRUB_BLOCK_SIZE 64	/* bytes */
+#define CPC925_NR_CSROWS	8
+
+/*
+ * All registers and bits definitions are taken from
+ * "CPC925 Bridge and Memory Controller User Manual, SA14-2761-02".
+ */
+
+/*
+ * CPU and Memory Controller Registers
+ */
+/************************************************************
+ *	Processor Interface Exception Mask Register (APIMASK)
+ ************************************************************/
+#define REG_APIMASK_OFFSET	0x30070
+enum apimask_bits {
+	APIMASK_DART	= CPC925_BIT(0), /* DART Exception */
+	APIMASK_ADI0	= CPC925_BIT(1), /* Handshake Error on PI0_ADI */
+	APIMASK_ADI1	= CPC925_BIT(2), /* Handshake Error on PI1_ADI */
+	APIMASK_STAT	= CPC925_BIT(3), /* Status Exception */
+	APIMASK_DERR	= CPC925_BIT(4), /* Data Error Exception */
+	APIMASK_ADRS0	= CPC925_BIT(5), /* Addressing Exception on PI0 */
+	APIMASK_ADRS1	= CPC925_BIT(6), /* Addressing Exception on PI1 */
+					 /* BIT(7) Reserved */
+	APIMASK_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
+	APIMASK_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
+	APIMASK_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
+	APIMASK_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
+
+	CPU_MASK_ENABLE = (APIMASK_DART | APIMASK_ADI0 | APIMASK_ADI1 |
+			   APIMASK_STAT | APIMASK_DERR | APIMASK_ADRS0 |
+			   APIMASK_ADRS1),
+	ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H |
+			   APIMASK_ECC_UE_L | APIMASK_ECC_CE_L),
+};
+
+/************************************************************
+ *	Processor Interface Exception Register (APIEXCP)
+ ************************************************************/
+#define REG_APIEXCP_OFFSET	0x30060
+enum apiexcp_bits {
+	APIEXCP_DART	= CPC925_BIT(0), /* DART Exception */
+	APIEXCP_ADI0	= CPC925_BIT(1), /* Handshake Error on PI0_ADI */
+	APIEXCP_ADI1	= CPC925_BIT(2), /* Handshake Error on PI1_ADI */
+	APIEXCP_STAT	= CPC925_BIT(3), /* Status Exception */
+	APIEXCP_DERR	= CPC925_BIT(4), /* Data Error Exception */
+	APIEXCP_ADRS0	= CPC925_BIT(5), /* Addressing Exception on PI0 */
+	APIEXCP_ADRS1	= CPC925_BIT(6), /* Addressing Exception on PI1 */
+					 /* BIT(7) Reserved */
+	APIEXCP_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
+	APIEXCP_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
+	APIEXCP_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
+	APIEXCP_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
+
+	CPU_EXCP_DETECTED = (APIEXCP_DART | APIEXCP_ADI0 | APIEXCP_ADI1 |
+			     APIEXCP_STAT | APIEXCP_DERR | APIEXCP_ADRS0 |
+			     APIEXCP_ADRS1),
+	UECC_EXCP_DETECTED = (APIEXCP_ECC_UE_H | APIEXCP_ECC_UE_L),
+	CECC_EXCP_DETECTED = (APIEXCP_ECC_CE_H | APIEXCP_ECC_CE_L),
+	ECC_EXCP_DETECTED = (UECC_EXCP_DETECTED | CECC_EXCP_DETECTED),
+};
+
+/************************************************************
+ *	Memory Bus Configuration Register (MBCR)
+************************************************************/
+#define REG_MBCR_OFFSET		0x2190
+#define MBCR_64BITCFG_SHIFT	23
+#define MBCR_64BITCFG_MASK	(1UL << MBCR_64BITCFG_SHIFT)
+#define MBCR_64BITBUS_SHIFT	22
+#define MBCR_64BITBUS_MASK	(1UL << MBCR_64BITBUS_SHIFT)
+
+/************************************************************
+ *	Memory Bank Mode Register (MBMR)
+************************************************************/
+#define REG_MBMR_OFFSET		0x21C0
+#define MBMR_MODE_MAX_VALUE	0xF
+#define MBMR_MODE_SHIFT		25
+#define MBMR_MODE_MASK		(MBMR_MODE_MAX_VALUE << MBMR_MODE_SHIFT)
+#define MBMR_BBA_SHIFT		24
+#define MBMR_BBA_MASK		(1UL << MBMR_BBA_SHIFT)
+
+/************************************************************
+ *	Memory Bank Boundary Address Register (MBBAR)
+ ************************************************************/
+#define REG_MBBAR_OFFSET	0x21D0
+#define MBBAR_BBA_MAX_VALUE	0xFF
+#define MBBAR_BBA_SHIFT		24
+#define MBBAR_BBA_MASK		(MBBAR_BBA_MAX_VALUE << MBBAR_BBA_SHIFT)
+
+/************************************************************
+ *	Memory Scrub Control Register (MSCR)
+ ************************************************************/
+#define REG_MSCR_OFFSET		0x2400
+#define MSCR_SCRUB_MOD_MASK	0xC0000000 /* scrub_mod - bit0:1*/
+#define MSCR_BACKGR_SCRUB	0x40000000 /* 01 */
+#define MSCR_SI_SHIFT		16 	/* si - bit8:15*/
+#define MSCR_SI_MAX_VALUE	0xFF
+#define MSCR_SI_MASK		(MSCR_SI_MAX_VALUE << MSCR_SI_SHIFT)
+
+/************************************************************
+ *	Memory Scrub Range Start Register (MSRSR)
+ ************************************************************/
+#define REG_MSRSR_OFFSET	0x2410
+
+/************************************************************
+ *	Memory Scrub Range End Register (MSRER)
+ ************************************************************/
+#define REG_MSRER_OFFSET	0x2420
+
+/************************************************************
+ *	Memory Scrub Pattern Register (MSPR)
+ ************************************************************/
+#define REG_MSPR_OFFSET		0x2430
+
+/************************************************************
+ *	Memory Check Control Register (MCCR)
+ ************************************************************/
+#define REG_MCCR_OFFSET		0x2440
+enum mccr_bits {
+	MCCR_ECC_EN	= CPC925_BIT(0), /* ECC high and low check */
+};
+
+/************************************************************
+ *	Memory Check Range End Register (MCRER)
+ ************************************************************/
+#define REG_MCRER_OFFSET	0x2450
+
+/************************************************************
+ *	Memory Error Address Register (MEAR)
+ ************************************************************/
+#define REG_MEAR_OFFSET		0x2460
+#define MEAR_BCNT_MAX_VALUE	0x3
+#define MEAR_BCNT_SHIFT		30
+#define MEAR_BCNT_MASK		(MEAR_BCNT_MAX_VALUE << MEAR_BCNT_SHIFT)
+#define MEAR_RANK_MAX_VALUE	0x7
+#define MEAR_RANK_SHIFT		27
+#define MEAR_RANK_MASK		(MEAR_RANK_MAX_VALUE << MEAR_RANK_SHIFT)
+#define MEAR_COL_MAX_VALUE	0x7FF
+#define MEAR_COL_SHIFT		16
+#define MEAR_COL_MASK		(MEAR_COL_MAX_VALUE << MEAR_COL_SHIFT)
+#define MEAR_BANK_MAX_VALUE	0x3
+#define MEAR_BANK_SHIFT		14
+#define MEAR_BANK_MASK		(MEAR_BANK_MAX_VALUE << MEAR_BANK_SHIFT)
+#define MEAR_ROW_MASK		0x00003FFF
+
+/************************************************************
+ *	Memory Error Syndrome Register (MESR)
+ ************************************************************/
+#define REG_MESR_OFFSET		0x2470
+#define MESR_ECC_SYN_H_MASK	0xFF00
+#define MESR_ECC_SYN_L_MASK	0x00FF
+
+/************************************************************
+ *	Memory Mode Control Register (MMCR)
+ ************************************************************/
+#define REG_MMCR_OFFSET		0x2500
+enum mmcr_bits {
+	MMCR_REG_DIMM_MODE = CPC925_BIT(3),
+};
+
+/*
+ * HyperTransport Link Registers
+ */
+/************************************************************
+ *  Error Handling/Enumeration Scratch Pad Register (ERRCTRL)
+ ************************************************************/
+#define REG_ERRCTRL_OFFSET	0x70140
+enum errctrl_bits {			 /* nonfatal interrupts for */
+	ERRCTRL_SERR_NF	= CPC925_BIT(0), /* system error */
+	ERRCTRL_CRC_NF	= CPC925_BIT(1), /* CRC error */
+	ERRCTRL_RSP_NF	= CPC925_BIT(2), /* Response error */
+	ERRCTRL_EOC_NF	= CPC925_BIT(3), /* End-Of-Chain error */
+	ERRCTRL_OVF_NF	= CPC925_BIT(4), /* Overflow error */
+	ERRCTRL_PROT_NF	= CPC925_BIT(5), /* Protocol error */
+
+	ERRCTRL_RSP_ERR	= CPC925_BIT(6), /* Response error received */
+	ERRCTRL_CHN_FAL = CPC925_BIT(7), /* Sync flooding detected */
+
+	HT_ERRCTRL_ENABLE = (ERRCTRL_SERR_NF | ERRCTRL_CRC_NF |
+			     ERRCTRL_RSP_NF | ERRCTRL_EOC_NF |
+			     ERRCTRL_OVF_NF | ERRCTRL_PROT_NF),
+	HT_ERRCTRL_DETECTED = (ERRCTRL_RSP_ERR | ERRCTRL_CHN_FAL),
+};
+
+/************************************************************
+ *  Link Configuration and Link Control Register (LINKCTRL)
+ ************************************************************/
+#define REG_LINKCTRL_OFFSET	0x70110
+enum linkctrl_bits {
+	LINKCTRL_CRC_ERR	= (CPC925_BIT(22) | CPC925_BIT(23)),
+	LINKCTRL_LINK_FAIL	= CPC925_BIT(27),
+
+	HT_LINKCTRL_DETECTED	= (LINKCTRL_CRC_ERR | LINKCTRL_LINK_FAIL),
+};
+
+/************************************************************
+ *  Link FreqCap/Error/Freq/Revision ID Register (LINKERR)
+ ************************************************************/
+#define REG_LINKERR_OFFSET	0x70120
+enum linkerr_bits {
+	LINKERR_EOC_ERR		= CPC925_BIT(17), /* End-Of-Chain error */
+	LINKERR_OVF_ERR		= CPC925_BIT(18), /* Receive Buffer Overflow */
+	LINKERR_PROT_ERR	= CPC925_BIT(19), /* Protocol error */
+
+	HT_LINKERR_DETECTED	= (LINKERR_EOC_ERR | LINKERR_OVF_ERR |
+				   LINKERR_PROT_ERR),
+};
+
+/************************************************************
+ *	Bridge Control Register (BRGCTRL)
+ ************************************************************/
+#define REG_BRGCTRL_OFFSET	0x70300
+enum brgctrl_bits {
+	BRGCTRL_DETSERR = CPC925_BIT(0), /* SERR on Secondary Bus */
+	BRGCTRL_SECBUSRESET = CPC925_BIT(9), /* Secondary Bus Reset */
+};
+
+/* Private structure for edac memory controller */
+struct cpc925_mc_pdata {
+	void __iomem *vbase;
+	unsigned long total_mem;
+	const char *name;
+	int edac_idx;
+};
+
+/* Private structure for common edac device */
+struct cpc925_dev_info {
+	void __iomem *vbase;
+	struct platform_device *pdev;
+	char *ctl_name;
+	int edac_idx;
+	struct edac_device_ctl_info *edac_dev;
+	void (*init)(struct cpc925_dev_info *dev_info);
+	void (*exit)(struct cpc925_dev_info *dev_info);
+	void (*check)(struct edac_device_ctl_info *edac_dev);
+};
+
+/* Get total memory size from Open Firmware DTB */
+static void get_total_mem(struct cpc925_mc_pdata *pdata)
+{
+	struct device_node *np = NULL;
+	const unsigned int *reg, *reg_end;
+	int len, sw, aw;
+	unsigned long start, size;
+
+	np = of_find_node_by_type(NULL, "memory");
+	if (!np)
+		return;
+
+	aw = of_n_addr_cells(np);
+	sw = of_n_size_cells(np);
+	reg = (const unsigned int *)of_get_property(np, "reg", &len);
+	reg_end = reg + len/4;
+
+	pdata->total_mem = 0;
+	do {
+		start = of_read_number(reg, aw);
+		reg += aw;
+		size = of_read_number(reg, sw);
+		reg += sw;
+		debugf1("%s: start 0x%lx, size 0x%lx\n", __func__,
+			start, size);
+		pdata->total_mem += size;
+	} while (reg < reg_end);
+
+	of_node_put(np);
+	debugf0("%s: total_mem 0x%lx\n", __func__, pdata->total_mem);
+}
+
+static void cpc925_init_csrows(struct mem_ctl_info *mci)
+{
+	struct cpc925_mc_pdata *pdata = mci->pvt_info;
+	struct csrow_info *csrow;
+	int index;
+	u32 mbmr, mbbar, bba;
+	unsigned long row_size, last_nr_pages = 0;
+
+	get_total_mem(pdata);
+
+	for (index = 0; index < mci->nr_csrows; index++) {
+		mbmr = __raw_readl(pdata->vbase + REG_MBMR_OFFSET +
+				   0x20 * index);
+		mbbar = __raw_readl(pdata->vbase + REG_MBBAR_OFFSET +
+				   0x20 + index);
+		bba = (((mbmr & MBMR_BBA_MASK) >> MBMR_BBA_SHIFT) << 8) |
+		       ((mbbar & MBBAR_BBA_MASK) >> MBBAR_BBA_SHIFT);
+
+		if (bba == 0)
+			continue; /* not populated */
+
+		csrow = &mci->csrows[index];
+
+		row_size = bba * (1UL << 28);	/* 256M */
+		csrow->first_page = last_nr_pages;
+		csrow->nr_pages = row_size >> PAGE_SHIFT;
+		csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+		last_nr_pages = csrow->last_page + 1;
+
+		csrow->mtype = MEM_RDDR;
+		csrow->edac_mode = EDAC_SECDED;
+
+		switch (csrow->nr_channels) {
+		case 1: /* Single channel */
+			csrow->grain = 32; /* four-beat burst of 32 bytes */
+			break;
+		case 2: /* Dual channel */
+		default:
+			csrow->grain = 64; /* four-beat burst of 64 bytes */
+			break;
+		}
+
+		switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
+		case 6: /* 0110, no way to differentiate X8 VS X16 */
+		case 5:	/* 0101 */
+		case 8: /* 1000 */
+			csrow->dtype = DEV_X16;
+			break;
+		case 7: /* 0111 */
+		case 9: /* 1001 */
+			csrow->dtype = DEV_X8;
+			break;
+		default:
+			csrow->dtype = DEV_UNKNOWN;
+			break;
+		}
+	}
+}
+
+/* Enable memory controller ECC detection */
+static void cpc925_mc_init(struct mem_ctl_info *mci)
+{
+	struct cpc925_mc_pdata *pdata = mci->pvt_info;
+	u32 apimask;
+	u32 mccr;
+
+	/* Enable various ECC error exceptions */
+	apimask = __raw_readl(pdata->vbase + REG_APIMASK_OFFSET);
+	if ((apimask & ECC_MASK_ENABLE) == 0) {
+		apimask |= ECC_MASK_ENABLE;
+		__raw_writel(apimask, pdata->vbase + REG_APIMASK_OFFSET);
+	}
+
+	/* Enable ECC detection */
+	mccr = __raw_readl(pdata->vbase + REG_MCCR_OFFSET);
+	if ((mccr & MCCR_ECC_EN) == 0) {
+		mccr |= MCCR_ECC_EN;
+		__raw_writel(mccr, pdata->vbase + REG_MCCR_OFFSET);
+	}
+}
+
+/* Disable memory controller ECC detection */
+static void cpc925_mc_exit(struct mem_ctl_info *mci)
+{
+	/*
+	 * WARNING:
+	 * We are supposed to clear the ECC error detection bits,
+	 * and it will be no problem to do so. However, once they
+	 * are cleared here if we want to re-install CPC925 EDAC
+	 * module later, setting them up in cpc925_mc_init() will
+	 * trigger machine check exception.
+	 * Also, it's ok to leave ECC error detection bits enabled,
+	 * since they are reset to 1 by default or by boot loader.
+	 */
+
+	return;
+}
+
+/*
+ * Revert DDR column/row/bank addresses into page frame number and
+ * offset in page.
+ *
+ * Suppose memory mode is 0x0111(128-bit mode, identical DIMM pairs),
+ * physical address(PA) bits to column address(CA) bits mappings are:
+ * CA	0   1   2   3   4   5   6   7   8   9   10
+ * PA	59  58  57  56  55  54  53  52  51  50  49
+ *
+ * physical address(PA) bits to bank address(BA) bits mappings are:
+ * BA	0   1
+ * PA	43  44
+ *
+ * physical address(PA) bits to row address(RA) bits mappings are:
+ * RA	0   1   2   3   4   5   6   7   8   9   10   11   12
+ * PA	36  35  34  48  47  46  45  40  41  42  39   38   37
+ */
+static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
+		unsigned long *pfn, unsigned long *offset, int *csrow)
+{
+	u32 bcnt, rank, col, bank, row;
+	u32 c;
+	unsigned long pa;
+	int i;
+
+	bcnt = (mear & MEAR_BCNT_MASK) >> MEAR_BCNT_SHIFT;
+	rank = (mear & MEAR_RANK_MASK) >> MEAR_RANK_SHIFT;
+	col = (mear & MEAR_COL_MASK) >> MEAR_COL_SHIFT;
+	bank = (mear & MEAR_BANK_MASK) >> MEAR_BANK_SHIFT;
+	row = mear & MEAR_ROW_MASK;
+
+	*csrow = rank;
+
+#ifdef CONFIG_EDAC_DEBUG
+	if (mci->csrows[rank].first_page == 0) {
+		cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a "
+			"non-populated csrow, broken hardware?\n");
+		return;
+	}
+#endif
+
+	/* Revert csrow number */
+	pa = mci->csrows[rank].first_page << PAGE_SHIFT;
+
+	/* Revert column address */
+	col += bcnt;
+	for (i = 0; i < 11; i++) {
+		c = col & 0x1;
+		col >>= 1;
+		pa |= c << (14 - i);
+	}
+
+	/* Revert bank address */
+	pa |= bank << 19;
+
+	/* Revert row address, in 4 steps */
+	for (i = 0; i < 3; i++) {
+		c = row & 0x1;
+		row >>= 1;
+		pa |= c << (26 - i);
+	}
+
+	for (i = 0; i < 3; i++) {
+		c = row & 0x1;
+		row >>= 1;
+		pa |= c << (21 + i);
+	}
+
+	for (i = 0; i < 4; i++) {
+		c = row & 0x1;
+		row >>= 1;
+		pa |= c << (18 - i);
+	}
+
+	for (i = 0; i < 3; i++) {
+		c = row & 0x1;
+		row >>= 1;
+		pa |= c << (29 - i);
+	}
+
+	*offset = pa & (PAGE_SIZE - 1);
+	*pfn = pa >> PAGE_SHIFT;
+
+	debugf0("%s: ECC physical address 0x%lx\n", __func__, pa);
+}
+
+static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome)
+{
+	if ((syndrome & MESR_ECC_SYN_H_MASK) == 0)
+		return 0;
+
+	if ((syndrome & MESR_ECC_SYN_L_MASK) == 0)
+		return 1;
+
+	cpc925_mc_printk(mci, KERN_INFO, "Unexpected syndrome value: 0x%x\n",
+			 syndrome);
+	return 1;
+}
+
+/* Check memory controller registers for ECC errors */
+static void cpc925_mc_check(struct mem_ctl_info *mci)
+{
+	struct cpc925_mc_pdata *pdata = mci->pvt_info;
+	u32 apiexcp;
+	u32 mear;
+	u32 mesr;
+	u16 syndrome;
+	unsigned long pfn = 0, offset = 0;
+	int csrow = 0, channel = 0;
+
+	/* APIEXCP is cleared when read */
+	apiexcp = __raw_readl(pdata->vbase + REG_APIEXCP_OFFSET);
+	if ((apiexcp & ECC_EXCP_DETECTED) == 0)
+		return;
+
+	mesr = __raw_readl(pdata->vbase + REG_MESR_OFFSET);
+	syndrome = mesr | (MESR_ECC_SYN_H_MASK | MESR_ECC_SYN_L_MASK);
+
+	mear = __raw_readl(pdata->vbase + REG_MEAR_OFFSET);
+
+	/* Revert column/row addresses into page frame number, etc */
+	cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow);
+
+	if (apiexcp & CECC_EXCP_DETECTED) {
+		cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
+		channel = cpc925_mc_find_channel(mci, syndrome);
+		edac_mc_handle_ce(mci, pfn, offset, syndrome,
+				  csrow, channel, mci->ctl_name);
+	}
+
+	if (apiexcp & UECC_EXCP_DETECTED) {
+		cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
+		edac_mc_handle_ue(mci, pfn, offset, csrow, mci->ctl_name);
+	}
+
+	cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
+	cpc925_mc_printk(mci, KERN_INFO, "APIMASK		0x%08x\n",
+		__raw_readl(pdata->vbase + REG_APIMASK_OFFSET));
+	cpc925_mc_printk(mci, KERN_INFO, "APIEXCP		0x%08x\n",
+		apiexcp);
+	cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Ctrl	0x%08x\n",
+		__raw_readl(pdata->vbase + REG_MSCR_OFFSET));
+	cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge Start	0x%08x\n",
+		__raw_readl(pdata->vbase + REG_MSRSR_OFFSET));
+	cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge End	0x%08x\n",
+		__raw_readl(pdata->vbase + REG_MSRER_OFFSET));
+	cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Pattern	0x%08x\n",
+		__raw_readl(pdata->vbase + REG_MSPR_OFFSET));
+	cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Ctrl		0x%08x\n",
+		__raw_readl(pdata->vbase + REG_MCCR_OFFSET));
+	cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Rge End	0x%08x\n",
+		__raw_readl(pdata->vbase + REG_MCRER_OFFSET));
+	cpc925_mc_printk(mci, KERN_INFO, "Mem Err Address	0x%08x\n",
+		mesr);
+	cpc925_mc_printk(mci, KERN_INFO, "Mem Err Syndrome	0x%08x\n",
+		syndrome);
+}
+
+/******************** CPU err device********************************/
+/* Enable CPU Errors detection */
+static void cpc925_cpu_init(struct cpc925_dev_info *dev_info)
+{
+	u32 apimask;
+
+	apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
+	if ((apimask & CPU_MASK_ENABLE) == 0) {
+		apimask |= CPU_MASK_ENABLE;
+		__raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET);
+	}
+}
+
+/* Disable CPU Errors detection */
+static void cpc925_cpu_exit(struct cpc925_dev_info *dev_info)
+{
+	/*
+	 * WARNING:
+	 * We are supposed to clear the CPU error detection bits,
+	 * and it will be no problem to do so. However, once they
+	 * are cleared here if we want to re-install CPC925 EDAC
+	 * module later, setting them up in cpc925_cpu_init() will
+	 * trigger machine check exception.
+	 * Also, it's ok to leave CPU error detection bits enabled,
+	 * since they are reset to 1 by default.
+	 */
+
+	return;
+}
+
+/* Check for CPU Errors */
+static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev)
+{
+	struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
+	u32 apiexcp;
+	u32 apimask;
+
+	/* APIEXCP is cleared when read */
+	apiexcp = __raw_readl(dev_info->vbase + REG_APIEXCP_OFFSET);
+	if ((apiexcp & CPU_EXCP_DETECTED) == 0)
+		return;
+
+	apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
+	cpc925_printk(KERN_INFO, "Processor Interface Fault\n"
+				 "Processor Interface register dump:\n");
+	cpc925_printk(KERN_INFO, "APIMASK		0x%08x\n", apimask);
+	cpc925_printk(KERN_INFO, "APIEXCP		0x%08x\n", apiexcp);
+
+	edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+/******************** HT Link err device****************************/
+/* Enable HyperTransport Link Error detection */
+static void cpc925_htlink_init(struct cpc925_dev_info *dev_info)
+{
+	u32 ht_errctrl;
+
+	ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
+	if ((ht_errctrl & HT_ERRCTRL_ENABLE) == 0) {
+		ht_errctrl |= HT_ERRCTRL_ENABLE;
+		__raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
+	}
+}
+
+/* Disable HyperTransport Link Error detection */
+static void cpc925_htlink_exit(struct cpc925_dev_info *dev_info)
+{
+	u32 ht_errctrl;
+
+	ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
+	ht_errctrl &= ~HT_ERRCTRL_ENABLE;
+	__raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
+}
+
+/* Check for HyperTransport Link errors */
+static void cpc925_htlink_check(struct edac_device_ctl_info *edac_dev)
+{
+	struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
+	u32 brgctrl = __raw_readl(dev_info->vbase + REG_BRGCTRL_OFFSET);
+	u32 linkctrl = __raw_readl(dev_info->vbase + REG_LINKCTRL_OFFSET);
+	u32 errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
+	u32 linkerr = __raw_readl(dev_info->vbase + REG_LINKERR_OFFSET);
+
+	if (!((brgctrl & BRGCTRL_DETSERR) ||
+	      (linkctrl & HT_LINKCTRL_DETECTED) ||
+	      (errctrl & HT_ERRCTRL_DETECTED) ||
+	      (linkerr & HT_LINKERR_DETECTED)))
+		return;
+
+	cpc925_printk(KERN_INFO, "HT Link Fault\n"
+				 "HT register dump:\n");
+	cpc925_printk(KERN_INFO, "Bridge Ctrl			0x%08x\n",
+		      brgctrl);
+	cpc925_printk(KERN_INFO, "Link Config Ctrl		0x%08x\n",
+		      linkctrl);
+	cpc925_printk(KERN_INFO, "Error Enum and Ctrl		0x%08x\n",
+		      errctrl);
+	cpc925_printk(KERN_INFO, "Link Error			0x%08x\n",
+		      linkerr);
+
+	/* Clear by write 1 */
+	if (brgctrl & BRGCTRL_DETSERR)
+		__raw_writel(BRGCTRL_DETSERR,
+				dev_info->vbase + REG_BRGCTRL_OFFSET);
+
+	if (linkctrl & HT_LINKCTRL_DETECTED)
+		__raw_writel(HT_LINKCTRL_DETECTED,
+				dev_info->vbase + REG_LINKCTRL_OFFSET);
+
+	/* Initiate Secondary Bus Reset to clear the chain failure */
+	if (errctrl & ERRCTRL_CHN_FAL)
+		__raw_writel(BRGCTRL_SECBUSRESET,
+				dev_info->vbase + REG_BRGCTRL_OFFSET);
+
+	if (errctrl & ERRCTRL_RSP_ERR)
+		__raw_writel(ERRCTRL_RSP_ERR,
+				dev_info->vbase + REG_ERRCTRL_OFFSET);
+
+	if (linkerr & HT_LINKERR_DETECTED)
+		__raw_writel(HT_LINKERR_DETECTED,
+				dev_info->vbase + REG_LINKERR_OFFSET);
+
+	edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+static struct cpc925_dev_info cpc925_devs[] = {
+	{
+	.ctl_name = CPC925_CPU_ERR_DEV,
+	.init = cpc925_cpu_init,
+	.exit = cpc925_cpu_exit,
+	.check = cpc925_cpu_check,
+	},
+	{
+	.ctl_name = CPC925_HT_LINK_DEV,
+	.init = cpc925_htlink_init,
+	.exit = cpc925_htlink_exit,
+	.check = cpc925_htlink_check,
+	},
+	{0}, /* Terminated by NULL */
+};
+
+/*
+ * Add CPU Err detection and HyperTransport Link Err detection
+ * as common "edac_device", they have no corresponding device
+ * nodes in the Open Firmware DTB and we have to add platform
+ * devices for them. Also, they will share the MMIO with that
+ * of memory controller.
+ */
+static void cpc925_add_edac_devices(void __iomem *vbase)
+{
+	struct cpc925_dev_info *dev_info;
+
+	if (!vbase) {
+		cpc925_printk(KERN_ERR, "MMIO not established yet\n");
+		return;
+	}
+
+	for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
+		dev_info->vbase = vbase;
+		dev_info->pdev = platform_device_register_simple(
+					dev_info->ctl_name, 0, NULL, 0);
+		if (IS_ERR(dev_info->pdev)) {
+			cpc925_printk(KERN_ERR,
+				"Can't register platform device for %s\n",
+				dev_info->ctl_name);
+			continue;
+		}
+
+		/*
+		 * Don't have to allocate private structure but
+		 * make use of cpc925_devs[] instead.
+		 */
+		dev_info->edac_idx = edac_device_alloc_index();
+		dev_info->edac_dev =
+			edac_device_alloc_ctl_info(0, dev_info->ctl_name,
+				1, NULL, 0, 0, NULL, 0, dev_info->edac_idx);
+		if (!dev_info->edac_dev) {
+			cpc925_printk(KERN_ERR, "No memory for edac device\n");
+			goto err1;
+		}
+
+		dev_info->edac_dev->pvt_info = dev_info;
+		dev_info->edac_dev->dev = &dev_info->pdev->dev;
+		dev_info->edac_dev->ctl_name = dev_info->ctl_name;
+		dev_info->edac_dev->mod_name = CPC925_EDAC_MOD_STR;
+		dev_info->edac_dev->dev_name = dev_name(&dev_info->pdev->dev);
+
+		if (edac_op_state == EDAC_OPSTATE_POLL)
+			dev_info->edac_dev->edac_check = dev_info->check;
+
+		if (dev_info->init)
+			dev_info->init(dev_info);
+
+		if (edac_device_add_device(dev_info->edac_dev) > 0) {
+			cpc925_printk(KERN_ERR,
+				"Unable to add edac device for %s\n",
+				dev_info->ctl_name);
+			goto err2;
+		}
+
+		debugf0("%s: Successfully added edac device for %s\n",
+			__func__, dev_info->ctl_name);
+
+		continue;
+
+err2:
+		if (dev_info->exit)
+			dev_info->exit(dev_info);
+		edac_device_free_ctl_info(dev_info->edac_dev);
+err1:
+		platform_device_unregister(dev_info->pdev);
+	}
+}
+
+/*
+ * Delete the common "edac_device" for CPU Err Detection
+ * and HyperTransport Link Err Detection
+ */
+static void cpc925_del_edac_devices(void)
+{
+	struct cpc925_dev_info *dev_info;
+
+	for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
+		if (dev_info->edac_dev) {
+			edac_device_del_device(dev_info->edac_dev->dev);
+			edac_device_free_ctl_info(dev_info->edac_dev);
+			platform_device_unregister(dev_info->pdev);
+		}
+
+		if (dev_info->exit)
+			dev_info->exit(dev_info);
+
+		debugf0("%s: Successfully deleted edac device for %s\n",
+			__func__, dev_info->ctl_name);
+	}
+}
+
+/* Convert current back-ground scrub rate into byte/sec bandwith */
+static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
+{
+	struct cpc925_mc_pdata *pdata = mci->pvt_info;
+	u32 mscr;
+	u8 si;
+
+	mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET);
+	si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT;
+
+	debugf0("%s, Mem Scrub Ctrl Register 0x%x\n", __func__, mscr);
+
+	if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) ||
+	    (si == 0)) {
+		cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n");
+		*bw = 0;
+	} else
+		*bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si;
+
+	return 0;
+}
+
+/* Return 0 for single channel; 1 for dual channel */
+static int cpc925_mc_get_channels(void __iomem *vbase)
+{
+	int dual = 0;
+	u32 mbcr;
+
+	mbcr = __raw_readl(vbase + REG_MBCR_OFFSET);
+
+	/*
+	 * Dual channel only when 128-bit wide physical bus
+	 * and 128-bit configuration.
+	 */
+	if (((mbcr & MBCR_64BITCFG_MASK) == 0) &&
+	    ((mbcr & MBCR_64BITBUS_MASK) == 0))
+		dual = 1;
+
+	debugf0("%s: %s channel\n", __func__,
+		(dual > 0) ? "Dual" : "Single");
+
+	return dual;
+}
+
+static int __devinit cpc925_probe(struct platform_device *pdev)
+{
+	static int edac_mc_idx;
+	struct mem_ctl_info *mci;
+	void __iomem *vbase;
+	struct cpc925_mc_pdata *pdata;
+	struct resource *r;
+	int res = 0, nr_channels;
+
+	debugf0("%s: %s platform device found!\n", __func__, pdev->name);
+
+	if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) {
+		res = -ENOMEM;
+		goto out;
+	}
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r) {
+		cpc925_printk(KERN_ERR, "Unable to get resource\n");
+		res = -ENOENT;
+		goto err1;
+	}
+
+	if (!devm_request_mem_region(&pdev->dev,
+				     r->start,
+				     resource_size(r),
+				     pdev->name)) {
+		cpc925_printk(KERN_ERR, "Unable to request mem region\n");
+		res = -EBUSY;
+		goto err1;
+	}
+
+	vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+	if (!vbase) {
+		cpc925_printk(KERN_ERR, "Unable to ioremap device\n");
+		res = -ENOMEM;
+		goto err2;
+	}
+
+	nr_channels = cpc925_mc_get_channels(vbase);
+	mci = edac_mc_alloc(sizeof(struct cpc925_mc_pdata),
+			CPC925_NR_CSROWS, nr_channels + 1, edac_mc_idx);
+	if (!mci) {
+		cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n");
+		res = -ENOMEM;
+		goto err2;
+	}
+
+	pdata = mci->pvt_info;
+	pdata->vbase = vbase;
+	pdata->edac_idx = edac_mc_idx++;
+	pdata->name = pdev->name;
+
+	mci->dev = &pdev->dev;
+	platform_set_drvdata(pdev, mci);
+	mci->dev_name = dev_name(&pdev->dev);
+	mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+	mci->edac_cap = EDAC_FLAG_SECDED;
+	mci->mod_name = CPC925_EDAC_MOD_STR;
+	mci->mod_ver = CPC925_EDAC_REVISION;
+	mci->ctl_name = pdev->name;
+
+	if (edac_op_state == EDAC_OPSTATE_POLL)
+		mci->edac_check = cpc925_mc_check;
+
+	mci->ctl_page_to_phys = NULL;
+	mci->scrub_mode = SCRUB_SW_SRC;
+	mci->set_sdram_scrub_rate = NULL;
+	mci->get_sdram_scrub_rate = cpc925_get_sdram_scrub_rate;
+
+	cpc925_init_csrows(mci);
+
+	/* Setup memory controller registers */
+	cpc925_mc_init(mci);
+
+	if (edac_mc_add_mc(mci) > 0) {
+		cpc925_mc_printk(mci, KERN_ERR, "Failed edac_mc_add_mc()\n");
+		goto err3;
+	}
+
+	cpc925_add_edac_devices(vbase);
+
+	/* get this far and it's successful */
+	debugf0("%s: success\n", __func__);
+
+	res = 0;
+	goto out;
+
+err3:
+	cpc925_mc_exit(mci);
+	edac_mc_free(mci);
+err2:
+	devm_release_mem_region(&pdev->dev, r->start, resource_size(r));
+err1:
+	devres_release_group(&pdev->dev, cpc925_probe);
+out:
+	return res;
+}
+
+static int cpc925_remove(struct platform_device *pdev)
+{
+	struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+	/*
+	 * Delete common edac devices before edac mc, because
+	 * the former share the MMIO of the latter.
+	 */
+	cpc925_del_edac_devices();
+	cpc925_mc_exit(mci);
+
+	edac_mc_del_mc(&pdev->dev);
+	edac_mc_free(mci);
+
+	return 0;
+}
+
+static struct platform_driver cpc925_edac_driver = {
+	.probe = cpc925_probe,
+	.remove = cpc925_remove,
+	.driver = {
+		   .name = "cpc925_edac",
+	}
+};
+
+static int __init cpc925_edac_init(void)
+{
+	int ret = 0;
+
+	printk(KERN_INFO "IBM CPC925 EDAC driver " CPC925_EDAC_REVISION "\n");
+	printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc\n");
+
+	/* Only support POLL mode so far */
+	edac_op_state = EDAC_OPSTATE_POLL;
+
+	ret = platform_driver_register(&cpc925_edac_driver);
+	if (ret) {
+		printk(KERN_WARNING "Failed to register %s\n",
+			CPC925_EDAC_MOD_STR);
+	}
+
+	return ret;
+}
+
+static void __exit cpc925_edac_exit(void)
+{
+	platform_driver_unregister(&cpc925_edac_driver);
+}
+
+module_init(cpc925_edac_init);
+module_exit(cpc925_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
+MODULE_DESCRIPTION("IBM CPC925 Bridge and MC EDAC kernel module");
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
new file mode 100644
index 0000000..d205d49
--- /dev/null
+++ b/drivers/edac/e752x_edac.c
@@ -0,0 +1,1351 @@
+/*
+ * Intel e752x Memory Controller kernel module
+ * (C) 2004 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * See "enum e752x_chips" below for supported chipsets
+ *
+ * Written by Tom Zimmerman
+ *
+ * Contributors:
+ * 	Thayne Harbaugh at realmsys.com (?)
+ * 	Wang Zhenyu at intel.com
+ * 	Dave Jiang at mvista.com
+ *
+ * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define E752X_REVISION	" Ver: 2.0.2 " __DATE__
+#define EDAC_MOD_STR	"e752x_edac"
+
+static int report_non_memory_errors;
+static int force_function_unhide;
+static int sysbus_parity = -1;
+
+static struct edac_pci_ctl_info *e752x_pci;
+
+#define e752x_printk(level, fmt, arg...) \
+	edac_printk(level, "e752x", fmt, ##arg)
+
+#define e752x_mc_printk(mci, level, fmt, arg...) \
+	edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
+
+#ifndef PCI_DEVICE_ID_INTEL_7520_0
+#define PCI_DEVICE_ID_INTEL_7520_0      0x3590
+#endif				/* PCI_DEVICE_ID_INTEL_7520_0      */
+
+#ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
+#define PCI_DEVICE_ID_INTEL_7520_1_ERR  0x3591
+#endif				/* PCI_DEVICE_ID_INTEL_7520_1_ERR  */
+
+#ifndef PCI_DEVICE_ID_INTEL_7525_0
+#define PCI_DEVICE_ID_INTEL_7525_0      0x359E
+#endif				/* PCI_DEVICE_ID_INTEL_7525_0      */
+
+#ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
+#define PCI_DEVICE_ID_INTEL_7525_1_ERR  0x3593
+#endif				/* PCI_DEVICE_ID_INTEL_7525_1_ERR  */
+
+#ifndef PCI_DEVICE_ID_INTEL_7320_0
+#define PCI_DEVICE_ID_INTEL_7320_0	0x3592
+#endif				/* PCI_DEVICE_ID_INTEL_7320_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
+#define PCI_DEVICE_ID_INTEL_7320_1_ERR	0x3593
+#endif				/* PCI_DEVICE_ID_INTEL_7320_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_3100_0
+#define PCI_DEVICE_ID_INTEL_3100_0	0x35B0
+#endif				/* PCI_DEVICE_ID_INTEL_3100_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
+#define PCI_DEVICE_ID_INTEL_3100_1_ERR	0x35B1
+#endif				/* PCI_DEVICE_ID_INTEL_3100_1_ERR */
+
+#define E752X_NR_CSROWS		8	/* number of csrows */
+
+/* E752X register addresses - device 0 function 0 */
+#define E752X_DRB		0x60	/* DRAM row boundary register (8b) */
+#define E752X_DRA		0x70	/* DRAM row attribute register (8b) */
+					/*
+					 * 31:30   Device width row 7
+					 *      01=x8 10=x4 11=x8 DDR2
+					 * 27:26   Device width row 6
+					 * 23:22   Device width row 5
+					 * 19:20   Device width row 4
+					 * 15:14   Device width row 3
+					 * 11:10   Device width row 2
+					 *  7:6    Device width row 1
+					 *  3:2    Device width row 0
+					 */
+#define E752X_DRC		0x7C	/* DRAM controller mode reg (32b) */
+					/* FIXME:IS THIS RIGHT? */
+					/*
+					 * 22    Number channels 0=1,1=2
+					 * 19:18 DRB Granularity 32/64MB
+					 */
+#define E752X_DRM		0x80	/* Dimm mapping register */
+#define E752X_DDRCSR		0x9A	/* DDR control and status reg (16b) */
+					/*
+					 * 14:12 1 single A, 2 single B, 3 dual
+					 */
+#define E752X_TOLM		0xC4	/* DRAM top of low memory reg (16b) */
+#define E752X_REMAPBASE		0xC6	/* DRAM remap base address reg (16b) */
+#define E752X_REMAPLIMIT	0xC8	/* DRAM remap limit address reg (16b) */
+#define E752X_REMAPOFFSET	0xCA	/* DRAM remap limit offset reg (16b) */
+
+/* E752X register addresses - device 0 function 1 */
+#define E752X_FERR_GLOBAL	0x40	/* Global first error register (32b) */
+#define E752X_NERR_GLOBAL	0x44	/* Global next error register (32b) */
+#define E752X_HI_FERR		0x50	/* Hub interface first error reg (8b) */
+#define E752X_HI_NERR		0x52	/* Hub interface next error reg (8b) */
+#define E752X_HI_ERRMASK	0x54	/* Hub interface error mask reg (8b) */
+#define E752X_HI_SMICMD		0x5A	/* Hub interface SMI command reg (8b) */
+#define E752X_SYSBUS_FERR	0x60	/* System buss first error reg (16b) */
+#define E752X_SYSBUS_NERR	0x62	/* System buss next error reg (16b) */
+#define E752X_SYSBUS_ERRMASK	0x64	/* System buss error mask reg (16b) */
+#define E752X_SYSBUS_SMICMD	0x6A	/* System buss SMI command reg (16b) */
+#define E752X_BUF_FERR		0x70	/* Memory buffer first error reg (8b) */
+#define E752X_BUF_NERR		0x72	/* Memory buffer next error reg (8b) */
+#define E752X_BUF_ERRMASK	0x74	/* Memory buffer error mask reg (8b) */
+#define E752X_BUF_SMICMD	0x7A	/* Memory buffer SMI cmd reg (8b) */
+#define E752X_DRAM_FERR		0x80	/* DRAM first error register (16b) */
+#define E752X_DRAM_NERR		0x82	/* DRAM next error register (16b) */
+#define E752X_DRAM_ERRMASK	0x84	/* DRAM error mask register (8b) */
+#define E752X_DRAM_SMICMD	0x8A	/* DRAM SMI command register (8b) */
+#define E752X_DRAM_RETR_ADD	0xAC	/* DRAM Retry address register (32b) */
+#define E752X_DRAM_SEC1_ADD	0xA0	/* DRAM first correctable memory */
+					/*     error address register (32b) */
+					/*
+					 * 31    Reserved
+					 * 30:2  CE address (64 byte block 34:6
+					 * 1     Reserved
+					 * 0     HiLoCS
+					 */
+#define E752X_DRAM_SEC2_ADD	0xC8	/* DRAM first correctable memory */
+					/*     error address register (32b) */
+					/*
+					 * 31    Reserved
+					 * 30:2  CE address (64 byte block 34:6)
+					 * 1     Reserved
+					 * 0     HiLoCS
+					 */
+#define E752X_DRAM_DED_ADD	0xA4	/* DRAM first uncorrectable memory */
+					/*     error address register (32b) */
+					/*
+					 * 31    Reserved
+					 * 30:2  CE address (64 byte block 34:6)
+					 * 1     Reserved
+					 * 0     HiLoCS
+					 */
+#define E752X_DRAM_SCRB_ADD	0xA8	/* DRAM 1st uncorrectable scrub mem */
+					/*     error address register (32b) */
+					/*
+					 * 31    Reserved
+					 * 30:2  CE address (64 byte block 34:6
+					 * 1     Reserved
+					 * 0     HiLoCS
+					 */
+#define E752X_DRAM_SEC1_SYNDROME 0xC4	/* DRAM first correctable memory */
+					/*     error syndrome register (16b) */
+#define E752X_DRAM_SEC2_SYNDROME 0xC6	/* DRAM second correctable memory */
+					/*     error syndrome register (16b) */
+#define E752X_DEVPRES1		0xF4	/* Device Present 1 register (8b) */
+
+/* 3100 IMCH specific register addresses - device 0 function 1 */
+#define I3100_NSI_FERR		0x48	/* NSI first error reg (32b) */
+#define I3100_NSI_NERR		0x4C	/* NSI next error reg (32b) */
+#define I3100_NSI_SMICMD	0x54	/* NSI SMI command register (32b) */
+#define I3100_NSI_EMASK		0x90	/* NSI error mask register (32b) */
+
+/* ICH5R register addresses - device 30 function 0 */
+#define ICH5R_PCI_STAT		0x06	/* PCI status register (16b) */
+#define ICH5R_PCI_2ND_STAT	0x1E	/* PCI status secondary reg (16b) */
+#define ICH5R_PCI_BRIDGE_CTL	0x3E	/* PCI bridge control register (16b) */
+
+enum e752x_chips {
+	E7520 = 0,
+	E7525 = 1,
+	E7320 = 2,
+	I3100 = 3
+};
+
+struct e752x_pvt {
+	struct pci_dev *bridge_ck;
+	struct pci_dev *dev_d0f0;
+	struct pci_dev *dev_d0f1;
+	u32 tolm;
+	u32 remapbase;
+	u32 remaplimit;
+	int mc_symmetric;
+	u8 map[8];
+	int map_type;
+	const struct e752x_dev_info *dev_info;
+};
+
+struct e752x_dev_info {
+	u16 err_dev;
+	u16 ctl_dev;
+	const char *ctl_name;
+};
+
+struct e752x_error_info {
+	u32 ferr_global;
+	u32 nerr_global;
+	u32 nsi_ferr;	/* 3100 only */
+	u32 nsi_nerr;	/* 3100 only */
+	u8 hi_ferr;	/* all but 3100 */
+	u8 hi_nerr;	/* all but 3100 */
+	u16 sysbus_ferr;
+	u16 sysbus_nerr;
+	u8 buf_ferr;
+	u8 buf_nerr;
+	u16 dram_ferr;
+	u16 dram_nerr;
+	u32 dram_sec1_add;
+	u32 dram_sec2_add;
+	u16 dram_sec1_syndrome;
+	u16 dram_sec2_syndrome;
+	u32 dram_ded_add;
+	u32 dram_scrb_add;
+	u32 dram_retr_add;
+};
+
+static const struct e752x_dev_info e752x_devs[] = {
+	[E7520] = {
+		.err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
+		.ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
+		.ctl_name = "E7520"},
+	[E7525] = {
+		.err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
+		.ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
+		.ctl_name = "E7525"},
+	[E7320] = {
+		.err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
+		.ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
+		.ctl_name = "E7320"},
+	[I3100] = {
+		.err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR,
+		.ctl_dev = PCI_DEVICE_ID_INTEL_3100_0,
+		.ctl_name = "3100"},
+};
+
+static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
+				unsigned long page)
+{
+	u32 remap;
+	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
+
+	debugf3("%s()\n", __func__);
+
+	if (page < pvt->tolm)
+		return page;
+
+	if ((page >= 0x100000) && (page < pvt->remapbase))
+		return page;
+
+	remap = (page - pvt->tolm) + pvt->remapbase;
+
+	if (remap < pvt->remaplimit)
+		return remap;
+
+	e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
+	return pvt->tolm - 1;
+}
+
+static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
+			u32 sec1_add, u16 sec1_syndrome)
+{
+	u32 page;
+	int row;
+	int channel;
+	int i;
+	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
+
+	debugf3("%s()\n", __func__);
+
+	/* convert the addr to 4k page */
+	page = sec1_add >> (PAGE_SHIFT - 4);
+
+	/* FIXME - check for -1 */
+	if (pvt->mc_symmetric) {
+		/* chip select are bits 14 & 13 */
+		row = ((page >> 1) & 3);
+		e752x_printk(KERN_WARNING,
+			"Test row %d Table %d %d %d %d %d %d %d %d\n", row,
+			pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
+			pvt->map[4], pvt->map[5], pvt->map[6],
+			pvt->map[7]);
+
+		/* test for channel remapping */
+		for (i = 0; i < 8; i++) {
+			if (pvt->map[i] == row)
+				break;
+		}
+
+		e752x_printk(KERN_WARNING, "Test computed row %d\n", i);
+
+		if (i < 8)
+			row = i;
+		else
+			e752x_mc_printk(mci, KERN_WARNING,
+					"row %d not found in remap table\n",
+					row);
+	} else
+		row = edac_mc_find_csrow_by_page(mci, page);
+
+	/* 0 = channel A, 1 = channel B */
+	channel = !(error_one & 1);
+
+	/* e752x mc reads 34:6 of the DRAM linear address */
+	edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4),
+			sec1_syndrome, row, channel, "e752x CE");
+}
+
+static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
+			u32 sec1_add, u16 sec1_syndrome, int *error_found,
+			int handle_error)
+{
+	*error_found = 1;
+
+	if (handle_error)
+		do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
+}
+
+static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
+			u32 ded_add, u32 scrb_add)
+{
+	u32 error_2b, block_page;
+	int row;
+	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
+
+	debugf3("%s()\n", __func__);
+
+	if (error_one & 0x0202) {
+		error_2b = ded_add;
+
+		/* convert to 4k address */
+		block_page = error_2b >> (PAGE_SHIFT - 4);
+
+		row = pvt->mc_symmetric ?
+		/* chip select are bits 14 & 13 */
+			((block_page >> 1) & 3) :
+			edac_mc_find_csrow_by_page(mci, block_page);
+
+		/* e752x mc reads 34:6 of the DRAM linear address */
+		edac_mc_handle_ue(mci, block_page,
+				offset_in_page(error_2b << 4),
+				row, "e752x UE from Read");
+	}
+	if (error_one & 0x0404) {
+		error_2b = scrb_add;
+
+		/* convert to 4k address */
+		block_page = error_2b >> (PAGE_SHIFT - 4);
+
+		row = pvt->mc_symmetric ?
+		/* chip select are bits 14 & 13 */
+			((block_page >> 1) & 3) :
+			edac_mc_find_csrow_by_page(mci, block_page);
+
+		/* e752x mc reads 34:6 of the DRAM linear address */
+		edac_mc_handle_ue(mci, block_page,
+				offset_in_page(error_2b << 4),
+				row, "e752x UE from Scruber");
+	}
+}
+
+static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
+			u32 ded_add, u32 scrb_add, int *error_found,
+			int handle_error)
+{
+	*error_found = 1;
+
+	if (handle_error)
+		do_process_ue(mci, error_one, ded_add, scrb_add);
+}
+
+static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
+					 int *error_found, int handle_error)
+{
+	*error_found = 1;
+
+	if (!handle_error)
+		return;
+
+	debugf3("%s()\n", __func__);
+	edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
+}
+
+static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
+				 u32 retry_add)
+{
+	u32 error_1b, page;
+	int row;
+	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
+
+	error_1b = retry_add;
+	page = error_1b >> (PAGE_SHIFT - 4);  /* convert the addr to 4k page */
+
+	/* chip select are bits 14 & 13 */
+	row = pvt->mc_symmetric ? ((page >> 1) & 3) :
+		edac_mc_find_csrow_by_page(mci, page);
+
+	e752x_mc_printk(mci, KERN_WARNING,
+			"CE page 0x%lx, row %d : Memory read retry\n",
+			(long unsigned int)page, row);
+}
+
+static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
+				u32 retry_add, int *error_found,
+				int handle_error)
+{
+	*error_found = 1;
+
+	if (handle_error)
+		do_process_ded_retry(mci, error, retry_add);
+}
+
+static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
+					int *error_found, int handle_error)
+{
+	*error_found = 1;
+
+	if (handle_error)
+		e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n");
+}
+
+static char *global_message[11] = {
+	"PCI Express C1",
+	"PCI Express C",
+	"PCI Express B1",
+	"PCI Express B",
+	"PCI Express A1",
+	"PCI Express A",
+	"DMA Controller",
+	"HUB or NS Interface",
+	"System Bus",
+	"DRAM Controller",  /* 9th entry */
+	"Internal Buffer"
+};
+
+#define DRAM_ENTRY	9
+
+static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
+
+static void do_global_error(int fatal, u32 errors)
+{
+	int i;
+
+	for (i = 0; i < 11; i++) {
+		if (errors & (1 << i)) {
+			/* If the error is from DRAM Controller OR
+			 * we are to report ALL errors, then
+			 * report the error
+			 */
+			if ((i == DRAM_ENTRY) || report_non_memory_errors)
+				e752x_printk(KERN_WARNING, "%sError %s\n",
+					fatal_message[fatal],
+					global_message[i]);
+		}
+	}
+}
+
+static inline void global_error(int fatal, u32 errors, int *error_found,
+				int handle_error)
+{
+	*error_found = 1;
+
+	if (handle_error)
+		do_global_error(fatal, errors);
+}
+
+static char *hub_message[7] = {
+	"HI Address or Command Parity", "HI Illegal Access",
+	"HI Internal Parity", "Out of Range Access",
+	"HI Data Parity", "Enhanced Config Access",
+	"Hub Interface Target Abort"
+};
+
+static void do_hub_error(int fatal, u8 errors)
+{
+	int i;
+
+	for (i = 0; i < 7; i++) {
+		if (errors & (1 << i))
+			e752x_printk(KERN_WARNING, "%sError %s\n",
+				fatal_message[fatal], hub_message[i]);
+	}
+}
+
+static inline void hub_error(int fatal, u8 errors, int *error_found,
+			int handle_error)
+{
+	*error_found = 1;
+
+	if (handle_error)
+		do_hub_error(fatal, errors);
+}
+
+#define NSI_FATAL_MASK		0x0c080081
+#define NSI_NON_FATAL_MASK	0x23a0ba64
+#define NSI_ERR_MASK		(NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
+
+static char *nsi_message[30] = {
+	"NSI Link Down",	/* NSI_FERR/NSI_NERR bit 0, fatal error */
+	"",						/* reserved */
+	"NSI Parity Error",				/* bit 2, non-fatal */
+	"",						/* reserved */
+	"",						/* reserved */
+	"Correctable Error Message",			/* bit 5, non-fatal */
+	"Non-Fatal Error Message",			/* bit 6, non-fatal */
+	"Fatal Error Message",				/* bit 7, fatal */
+	"",						/* reserved */
+	"Receiver Error",				/* bit 9, non-fatal */
+	"",						/* reserved */
+	"Bad TLP",					/* bit 11, non-fatal */
+	"Bad DLLP",					/* bit 12, non-fatal */
+	"REPLAY_NUM Rollover",				/* bit 13, non-fatal */
+	"",						/* reserved */
+	"Replay Timer Timeout",				/* bit 15, non-fatal */
+	"",						/* reserved */
+	"",						/* reserved */
+	"",						/* reserved */
+	"Data Link Protocol Error",			/* bit 19, fatal */
+	"",						/* reserved */
+	"Poisoned TLP",					/* bit 21, non-fatal */
+	"",						/* reserved */
+	"Completion Timeout",				/* bit 23, non-fatal */
+	"Completer Abort",				/* bit 24, non-fatal */
+	"Unexpected Completion",			/* bit 25, non-fatal */
+	"Receiver Overflow",				/* bit 26, fatal */
+	"Malformed TLP",				/* bit 27, fatal */
+	"",						/* reserved */
+	"Unsupported Request"				/* bit 29, non-fatal */
+};
+
+static void do_nsi_error(int fatal, u32 errors)
+{
+	int i;
+
+	for (i = 0; i < 30; i++) {
+		if (errors & (1 << i))
+			printk(KERN_WARNING "%sError %s\n",
+			       fatal_message[fatal], nsi_message[i]);
+	}
+}
+
+static inline void nsi_error(int fatal, u32 errors, int *error_found,
+		int handle_error)
+{
+	*error_found = 1;
+
+	if (handle_error)
+		do_nsi_error(fatal, errors);
+}
+
+static char *membuf_message[4] = {
+	"Internal PMWB to DRAM parity",
+	"Internal PMWB to System Bus Parity",
+	"Internal System Bus or IO to PMWB Parity",
+	"Internal DRAM to PMWB Parity"
+};
+
+static void do_membuf_error(u8 errors)
+{
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		if (errors & (1 << i))
+			e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
+				membuf_message[i]);
+	}
+}
+
+static inline void membuf_error(u8 errors, int *error_found, int handle_error)
+{
+	*error_found = 1;
+
+	if (handle_error)
+		do_membuf_error(errors);
+}
+
+static char *sysbus_message[10] = {
+	"Addr or Request Parity",
+	"Data Strobe Glitch",
+	"Addr Strobe Glitch",
+	"Data Parity",
+	"Addr Above TOM",
+	"Non DRAM Lock Error",
+	"MCERR", "BINIT",
+	"Memory Parity",
+	"IO Subsystem Parity"
+};
+
+static void do_sysbus_error(int fatal, u32 errors)
+{
+	int i;
+
+	for (i = 0; i < 10; i++) {
+		if (errors & (1 << i))
+			e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
+				fatal_message[fatal], sysbus_message[i]);
+	}
+}
+
+static inline void sysbus_error(int fatal, u32 errors, int *error_found,
+				int handle_error)
+{
+	*error_found = 1;
+
+	if (handle_error)
+		do_sysbus_error(fatal, errors);
+}
+
+static void e752x_check_hub_interface(struct e752x_error_info *info,
+				int *error_found, int handle_error)
+{
+	u8 stat8;
+
+	//pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
+
+	stat8 = info->hi_ferr;
+
+	if (stat8 & 0x7f) {	/* Error, so process */
+		stat8 &= 0x7f;
+
+		if (stat8 & 0x2b)
+			hub_error(1, stat8 & 0x2b, error_found, handle_error);
+
+		if (stat8 & 0x54)
+			hub_error(0, stat8 & 0x54, error_found, handle_error);
+	}
+	//pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
+
+	stat8 = info->hi_nerr;
+
+	if (stat8 & 0x7f) {	/* Error, so process */
+		stat8 &= 0x7f;
+
+		if (stat8 & 0x2b)
+			hub_error(1, stat8 & 0x2b, error_found, handle_error);
+
+		if (stat8 & 0x54)
+			hub_error(0, stat8 & 0x54, error_found, handle_error);
+	}
+}
+
+static void e752x_check_ns_interface(struct e752x_error_info *info,
+				int *error_found, int handle_error)
+{
+	u32 stat32;
+
+	stat32 = info->nsi_ferr;
+	if (stat32 & NSI_ERR_MASK) { /* Error, so process */
+		if (stat32 & NSI_FATAL_MASK)	/* check for fatal errors */
+			nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
+				  handle_error);
+		if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */
+			nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
+				  handle_error);
+	}
+	stat32 = info->nsi_nerr;
+	if (stat32 & NSI_ERR_MASK) {
+		if (stat32 & NSI_FATAL_MASK)
+			nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
+				  handle_error);
+		if (stat32 & NSI_NON_FATAL_MASK)
+			nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
+				  handle_error);
+	}
+}
+
+static void e752x_check_sysbus(struct e752x_error_info *info,
+			int *error_found, int handle_error)
+{
+	u32 stat32, error32;
+
+	//pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
+	stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
+
+	if (stat32 == 0)
+		return;		/* no errors */
+
+	error32 = (stat32 >> 16) & 0x3ff;
+	stat32 = stat32 & 0x3ff;
+
+	if (stat32 & 0x087)
+		sysbus_error(1, stat32 & 0x087, error_found, handle_error);
+
+	if (stat32 & 0x378)
+		sysbus_error(0, stat32 & 0x378, error_found, handle_error);
+
+	if (error32 & 0x087)
+		sysbus_error(1, error32 & 0x087, error_found, handle_error);
+
+	if (error32 & 0x378)
+		sysbus_error(0, error32 & 0x378, error_found, handle_error);
+}
+
+static void e752x_check_membuf(struct e752x_error_info *info,
+			int *error_found, int handle_error)
+{
+	u8 stat8;
+
+	stat8 = info->buf_ferr;
+
+	if (stat8 & 0x0f) {	/* Error, so process */
+		stat8 &= 0x0f;
+		membuf_error(stat8, error_found, handle_error);
+	}
+
+	stat8 = info->buf_nerr;
+
+	if (stat8 & 0x0f) {	/* Error, so process */
+		stat8 &= 0x0f;
+		membuf_error(stat8, error_found, handle_error);
+	}
+}
+
+static void e752x_check_dram(struct mem_ctl_info *mci,
+			struct e752x_error_info *info, int *error_found,
+			int handle_error)
+{
+	u16 error_one, error_next;
+
+	error_one = info->dram_ferr;
+	error_next = info->dram_nerr;
+
+	/* decode and report errors */
+	if (error_one & 0x0101)	/* check first error correctable */
+		process_ce(mci, error_one, info->dram_sec1_add,
+			info->dram_sec1_syndrome, error_found, handle_error);
+
+	if (error_next & 0x0101)	/* check next error correctable */
+		process_ce(mci, error_next, info->dram_sec2_add,
+			info->dram_sec2_syndrome, error_found, handle_error);
+
+	if (error_one & 0x4040)
+		process_ue_no_info_wr(mci, error_found, handle_error);
+
+	if (error_next & 0x4040)
+		process_ue_no_info_wr(mci, error_found, handle_error);
+
+	if (error_one & 0x2020)
+		process_ded_retry(mci, error_one, info->dram_retr_add,
+				error_found, handle_error);
+
+	if (error_next & 0x2020)
+		process_ded_retry(mci, error_next, info->dram_retr_add,
+				error_found, handle_error);
+
+	if (error_one & 0x0808)
+		process_threshold_ce(mci, error_one, error_found, handle_error);
+
+	if (error_next & 0x0808)
+		process_threshold_ce(mci, error_next, error_found,
+				handle_error);
+
+	if (error_one & 0x0606)
+		process_ue(mci, error_one, info->dram_ded_add,
+			info->dram_scrb_add, error_found, handle_error);
+
+	if (error_next & 0x0606)
+		process_ue(mci, error_next, info->dram_ded_add,
+			info->dram_scrb_add, error_found, handle_error);
+}
+
+static void e752x_get_error_info(struct mem_ctl_info *mci,
+				 struct e752x_error_info *info)
+{
+	struct pci_dev *dev;
+	struct e752x_pvt *pvt;
+
+	memset(info, 0, sizeof(*info));
+	pvt = (struct e752x_pvt *)mci->pvt_info;
+	dev = pvt->dev_d0f1;
+	pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
+
+	if (info->ferr_global) {
+		if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
+			pci_read_config_dword(dev, I3100_NSI_FERR,
+					     &info->nsi_ferr);
+			info->hi_ferr = 0;
+		} else {
+			pci_read_config_byte(dev, E752X_HI_FERR,
+					     &info->hi_ferr);
+			info->nsi_ferr = 0;
+		}
+		pci_read_config_word(dev, E752X_SYSBUS_FERR,
+				&info->sysbus_ferr);
+		pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
+		pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr);
+		pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
+				&info->dram_sec1_add);
+		pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
+				&info->dram_sec1_syndrome);
+		pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
+				&info->dram_ded_add);
+		pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
+				&info->dram_scrb_add);
+		pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
+				&info->dram_retr_add);
+
+		/* ignore the reserved bits just in case */
+		if (info->hi_ferr & 0x7f)
+			pci_write_config_byte(dev, E752X_HI_FERR,
+					info->hi_ferr);
+
+		if (info->nsi_ferr & NSI_ERR_MASK)
+			pci_write_config_dword(dev, I3100_NSI_FERR,
+					info->nsi_ferr);
+
+		if (info->sysbus_ferr)
+			pci_write_config_word(dev, E752X_SYSBUS_FERR,
+					info->sysbus_ferr);
+
+		if (info->buf_ferr & 0x0f)
+			pci_write_config_byte(dev, E752X_BUF_FERR,
+					info->buf_ferr);
+
+		if (info->dram_ferr)
+			pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
+					 info->dram_ferr, info->dram_ferr);
+
+		pci_write_config_dword(dev, E752X_FERR_GLOBAL,
+				info->ferr_global);
+	}
+
+	pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
+
+	if (info->nerr_global) {
+		if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
+			pci_read_config_dword(dev, I3100_NSI_NERR,
+					     &info->nsi_nerr);
+			info->hi_nerr = 0;
+		} else {
+			pci_read_config_byte(dev, E752X_HI_NERR,
+					     &info->hi_nerr);
+			info->nsi_nerr = 0;
+		}
+		pci_read_config_word(dev, E752X_SYSBUS_NERR,
+				&info->sysbus_nerr);
+		pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
+		pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr);
+		pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
+				&info->dram_sec2_add);
+		pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
+				&info->dram_sec2_syndrome);
+
+		if (info->hi_nerr & 0x7f)
+			pci_write_config_byte(dev, E752X_HI_NERR,
+					info->hi_nerr);
+
+		if (info->nsi_nerr & NSI_ERR_MASK)
+			pci_write_config_dword(dev, I3100_NSI_NERR,
+					info->nsi_nerr);
+
+		if (info->sysbus_nerr)
+			pci_write_config_word(dev, E752X_SYSBUS_NERR,
+					info->sysbus_nerr);
+
+		if (info->buf_nerr & 0x0f)
+			pci_write_config_byte(dev, E752X_BUF_NERR,
+					info->buf_nerr);
+
+		if (info->dram_nerr)
+			pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
+					 info->dram_nerr, info->dram_nerr);
+
+		pci_write_config_dword(dev, E752X_NERR_GLOBAL,
+				info->nerr_global);
+	}
+}
+
+static int e752x_process_error_info(struct mem_ctl_info *mci,
+				struct e752x_error_info *info,
+				int handle_errors)
+{
+	u32 error32, stat32;
+	int error_found;
+
+	error_found = 0;
+	error32 = (info->ferr_global >> 18) & 0x3ff;
+	stat32 = (info->ferr_global >> 4) & 0x7ff;
+
+	if (error32)
+		global_error(1, error32, &error_found, handle_errors);
+
+	if (stat32)
+		global_error(0, stat32, &error_found, handle_errors);
+
+	error32 = (info->nerr_global >> 18) & 0x3ff;
+	stat32 = (info->nerr_global >> 4) & 0x7ff;
+
+	if (error32)
+		global_error(1, error32, &error_found, handle_errors);
+
+	if (stat32)
+		global_error(0, stat32, &error_found, handle_errors);
+
+	e752x_check_hub_interface(info, &error_found, handle_errors);
+	e752x_check_ns_interface(info, &error_found, handle_errors);
+	e752x_check_sysbus(info, &error_found, handle_errors);
+	e752x_check_membuf(info, &error_found, handle_errors);
+	e752x_check_dram(mci, info, &error_found, handle_errors);
+	return error_found;
+}
+
+static void e752x_check(struct mem_ctl_info *mci)
+{
+	struct e752x_error_info info;
+
+	debugf3("%s()\n", __func__);
+	e752x_get_error_info(mci, &info);
+	e752x_process_error_info(mci, &info, 1);
+}
+
+/* Return 1 if dual channel mode is active.  Else return 0. */
+static inline int dual_channel_active(u16 ddrcsr)
+{
+	return (((ddrcsr >> 12) & 3) == 3);
+}
+
+/* Remap csrow index numbers if map_type is "reverse"
+ */
+static inline int remap_csrow_index(struct mem_ctl_info *mci, int index)
+{
+	struct e752x_pvt *pvt = mci->pvt_info;
+
+	if (!pvt->map_type)
+		return (7 - index);
+
+	return (index);
+}
+
+static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
+			u16 ddrcsr)
+{
+	struct csrow_info *csrow;
+	unsigned long last_cumul_size;
+	int index, mem_dev, drc_chan;
+	int drc_drbg;		/* DRB granularity 0=64mb, 1=128mb */
+	int drc_ddim;		/* DRAM Data Integrity Mode 0=none, 2=edac */
+	u8 value;
+	u32 dra, drc, cumul_size;
+
+	dra = 0;
+	for (index = 0; index < 4; index++) {
+		u8 dra_reg;
+		pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg);
+		dra |= dra_reg << (index * 8);
+	}
+	pci_read_config_dword(pdev, E752X_DRC, &drc);
+	drc_chan = dual_channel_active(ddrcsr);
+	drc_drbg = drc_chan + 1;	/* 128 in dual mode, 64 in single */
+	drc_ddim = (drc >> 20) & 0x3;
+
+	/* The dram row boundary (DRB) reg values are boundary address for
+	 * each DRAM row with a granularity of 64 or 128MB (single/dual
+	 * channel operation).  DRB regs are cumulative; therefore DRB7 will
+	 * contain the total memory contained in all eight rows.
+	 */
+	for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
+		/* mem_dev 0=x8, 1=x4 */
+		mem_dev = (dra >> (index * 4 + 2)) & 0x3;
+		csrow = &mci->csrows[remap_csrow_index(mci, index)];
+
+		mem_dev = (mem_dev == 2);
+		pci_read_config_byte(pdev, E752X_DRB + index, &value);
+		/* convert a 128 or 64 MiB DRB to a page size. */
+		cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
+		debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
+			cumul_size);
+		if (cumul_size == last_cumul_size)
+			continue;	/* not populated */
+
+		csrow->first_page = last_cumul_size;
+		csrow->last_page = cumul_size - 1;
+		csrow->nr_pages = cumul_size - last_cumul_size;
+		last_cumul_size = cumul_size;
+		csrow->grain = 1 << 12;	/* 4KiB - resolution of CELOG */
+		csrow->mtype = MEM_RDDR;	/* only one type supported */
+		csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+		/*
+		 * if single channel or x8 devices then SECDED
+		 * if dual channel and x4 then S4ECD4ED
+		 */
+		if (drc_ddim) {
+			if (drc_chan && mem_dev) {
+				csrow->edac_mode = EDAC_S4ECD4ED;
+				mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+			} else {
+				csrow->edac_mode = EDAC_SECDED;
+				mci->edac_cap |= EDAC_FLAG_SECDED;
+			}
+		} else
+			csrow->edac_mode = EDAC_NONE;
+	}
+}
+
+static void e752x_init_mem_map_table(struct pci_dev *pdev,
+				struct e752x_pvt *pvt)
+{
+	int index;
+	u8 value, last, row;
+
+	last = 0;
+	row = 0;
+
+	for (index = 0; index < 8; index += 2) {
+		pci_read_config_byte(pdev, E752X_DRB + index, &value);
+		/* test if there is a dimm in this slot */
+		if (value == last) {
+			/* no dimm in the slot, so flag it as empty */
+			pvt->map[index] = 0xff;
+			pvt->map[index + 1] = 0xff;
+		} else {	/* there is a dimm in the slot */
+			pvt->map[index] = row;
+			row++;
+			last = value;
+			/* test the next value to see if the dimm is double
+			 * sided
+			 */
+			pci_read_config_byte(pdev, E752X_DRB + index + 1,
+					&value);
+
+			/* the dimm is single sided, so flag as empty */
+			/* this is a double sided dimm to save the next row #*/
+			pvt->map[index + 1] = (value == last) ? 0xff :	row;
+			row++;
+			last = value;
+		}
+	}
+}
+
+/* Return 0 on success or 1 on failure. */
+static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
+			struct e752x_pvt *pvt)
+{
+	struct pci_dev *dev;
+
+	pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
+				pvt->dev_info->err_dev, pvt->bridge_ck);
+
+	if (pvt->bridge_ck == NULL)
+		pvt->bridge_ck = pci_scan_single_device(pdev->bus,
+							PCI_DEVFN(0, 1));
+
+	if (pvt->bridge_ck == NULL) {
+		e752x_printk(KERN_ERR, "error reporting device not found:"
+			"vendor %x device 0x%x (broken BIOS?)\n",
+			PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
+		return 1;
+	}
+
+	dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+				e752x_devs[dev_idx].ctl_dev,
+				NULL);
+
+	if (dev == NULL)
+		goto fail;
+
+	pvt->dev_d0f0 = dev;
+	pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
+
+	return 0;
+
+fail:
+	pci_dev_put(pvt->bridge_ck);
+	return 1;
+}
+
+/* Setup system bus parity mask register.
+ * Sysbus parity supported on:
+ *   e7320/e7520/e7525 + Xeon
+ *   i3100 + Xeon/Celeron
+ * Sysbus parity not supported on:
+ *   i3100 + Pentium M/Celeron M/Core Duo/Core2 Duo
+ */
+static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
+{
+	char *cpu_id = cpu_data(0).x86_model_id;
+	struct pci_dev *dev = pvt->dev_d0f1;
+	int enable = 1;
+
+	/* Allow module parameter override, else see if CPU supports parity */
+	if (sysbus_parity != -1) {
+		enable = sysbus_parity;
+	} else if (cpu_id[0] &&
+		   ((strstr(cpu_id, "Pentium") && strstr(cpu_id, " M ")) ||
+		    (strstr(cpu_id, "Celeron") && strstr(cpu_id, " M ")) ||
+		    (strstr(cpu_id, "Core") && strstr(cpu_id, "Duo")))) {
+		e752x_printk(KERN_INFO, "System Bus Parity not "
+			     "supported by CPU, disabling\n");
+		enable = 0;
+	}
+
+	if (enable)
+		pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000);
+	else
+		pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309);
+}
+
+static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
+{
+	struct pci_dev *dev;
+
+	dev = pvt->dev_d0f1;
+	/* Turn off error disable & SMI in case the BIOS turned it on */
+	if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
+		pci_write_config_dword(dev, I3100_NSI_EMASK, 0);
+		pci_write_config_dword(dev, I3100_NSI_SMICMD, 0);
+	} else {
+		pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
+		pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
+	}
+
+	e752x_init_sysbus_parity_mask(pvt);
+
+	pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
+	pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
+	pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
+	pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
+	pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
+}
+
+static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
+{
+	u16 pci_data;
+	u8 stat8;
+	struct mem_ctl_info *mci;
+	struct e752x_pvt *pvt;
+	u16 ddrcsr;
+	int drc_chan;		/* Number of channels 0=1chan,1=2chan */
+	struct e752x_error_info discard;
+
+	debugf0("%s(): mci\n", __func__);
+	debugf0("Starting Probe1\n");
+
+	/* check to see if device 0 function 1 is enabled; if it isn't, we
+	 * assume the BIOS has reserved it for a reason and is expecting
+	 * exclusive access, we take care not to violate that assumption and
+	 * fail the probe. */
+	pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
+	if (!force_function_unhide && !(stat8 & (1 << 5))) {
+		printk(KERN_INFO "Contact your BIOS vendor to see if the "
+			"E752x error registers can be safely un-hidden\n");
+		return -ENODEV;
+	}
+	stat8 |= (1 << 5);
+	pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
+
+	pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
+	/* FIXME: should check >>12 or 0xf, true for all? */
+	/* Dual channel = 1, Single channel = 0 */
+	drc_chan = dual_channel_active(ddrcsr);
+
+	mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0);
+
+	if (mci == NULL) {
+		return -ENOMEM;
+	}
+
+	debugf3("%s(): init mci\n", __func__);
+	mci->mtype_cap = MEM_FLAG_RDDR;
+	/* 3100 IMCH supports SECDEC only */
+	mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
+		(EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
+	/* FIXME - what if different memory types are in different csrows? */
+	mci->mod_name = EDAC_MOD_STR;
+	mci->mod_ver = E752X_REVISION;
+	mci->dev = &pdev->dev;
+
+	debugf3("%s(): init pvt\n", __func__);
+	pvt = (struct e752x_pvt *)mci->pvt_info;
+	pvt->dev_info = &e752x_devs[dev_idx];
+	pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
+
+	if (e752x_get_devs(pdev, dev_idx, pvt)) {
+		edac_mc_free(mci);
+		return -ENODEV;
+	}
+
+	debugf3("%s(): more mci init\n", __func__);
+	mci->ctl_name = pvt->dev_info->ctl_name;
+	mci->dev_name = pci_name(pdev);
+	mci->edac_check = e752x_check;
+	mci->ctl_page_to_phys = ctl_page_to_phys;
+
+	/* set the map type.  1 = normal, 0 = reversed
+	 * Must be set before e752x_init_csrows in case csrow mapping
+	 * is reversed.
+	 */
+	pci_read_config_byte(pdev, E752X_DRM, &stat8);
+	pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
+
+	e752x_init_csrows(mci, pdev, ddrcsr);
+	e752x_init_mem_map_table(pdev, pvt);
+
+	if (dev_idx == I3100)
+		mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
+	else
+		mci->edac_cap |= EDAC_FLAG_NONE;
+	debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
+
+	/* load the top of low memory, remap base, and remap limit vars */
+	pci_read_config_word(pdev, E752X_TOLM, &pci_data);
+	pvt->tolm = ((u32) pci_data) << 4;
+	pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data);
+	pvt->remapbase = ((u32) pci_data) << 14;
+	pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
+	pvt->remaplimit = ((u32) pci_data) << 14;
+	e752x_printk(KERN_INFO,
+			"tolm = %x, remapbase = %x, remaplimit = %x\n",
+			pvt->tolm, pvt->remapbase, pvt->remaplimit);
+
+	/* Here we assume that we will never see multiple instances of this
+	 * type of memory controller.  The ID is therefore hardcoded to 0.
+	 */
+	if (edac_mc_add_mc(mci)) {
+		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+		goto fail;
+	}
+
+	e752x_init_error_reporting_regs(pvt);
+	e752x_get_error_info(mci, &discard);	/* clear other MCH errors */
+
+	/* allocating generic PCI control info */
+	e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!e752x_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n", __func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
+
+	/* get this far and it's successful */
+	debugf3("%s(): success\n", __func__);
+	return 0;
+
+fail:
+	pci_dev_put(pvt->dev_d0f0);
+	pci_dev_put(pvt->dev_d0f1);
+	pci_dev_put(pvt->bridge_ck);
+	edac_mc_free(mci);
+
+	return -ENODEV;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit e752x_init_one(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	debugf0("%s()\n", __func__);
+
+	/* wake up and enable device */
+	if (pci_enable_device(pdev) < 0)
+		return -EIO;
+
+	return e752x_probe1(pdev, ent->driver_data);
+}
+
+static void __devexit e752x_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+	struct e752x_pvt *pvt;
+
+	debugf0("%s()\n", __func__);
+
+	if (e752x_pci)
+		edac_pci_release_generic_ctl(e752x_pci);
+
+	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+		return;
+
+	pvt = (struct e752x_pvt *)mci->pvt_info;
+	pci_dev_put(pvt->dev_d0f0);
+	pci_dev_put(pvt->dev_d0f1);
+	pci_dev_put(pvt->bridge_ck);
+	edac_mc_free(mci);
+}
+
+static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
+	{
+	 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 E7520},
+	{
+	 PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 E7525},
+	{
+	 PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 E7320},
+	{
+	 PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 I3100},
+	{
+	 0,
+	 }			/* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
+
+static struct pci_driver e752x_driver = {
+	.name = EDAC_MOD_STR,
+	.probe = e752x_init_one,
+	.remove = __devexit_p(e752x_remove_one),
+	.id_table = e752x_pci_tbl,
+};
+
+static int __init e752x_init(void)
+{
+	int pci_rc;
+
+	debugf3("%s()\n", __func__);
+
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
+	pci_rc = pci_register_driver(&e752x_driver);
+	return (pci_rc < 0) ? pci_rc : 0;
+}
+
+static void __exit e752x_exit(void)
+{
+	debugf3("%s()\n", __func__);
+	pci_unregister_driver(&e752x_driver);
+}
+
+module_init(e752x_init);
+module_exit(e752x_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
+MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
+
+module_param(force_function_unhide, int, 0444);
+MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
+		 " 1=force unhide and hope BIOS doesn't fight driver for "
+		"Dev0:Fun1 access");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+
+module_param(sysbus_parity, int, 0444);
+MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
+		" 1=enable system bus parity checking, default=auto-detect");
+module_param(report_non_memory_errors, int, 0644);
+MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error "
+		"reporting, 1=enable non-memory error reporting");
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
new file mode 100644
index 0000000..c7d11cc
--- /dev/null
+++ b/drivers/edac/e7xxx_edac.c
@@ -0,0 +1,577 @@
+/*
+ * Intel e7xxx Memory Controller kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * See "enum e7xxx_chips" below for supported chipsets
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ *	http://www.anime.net/~goemon/linux-ecc/
+ *
+ * Contributors:
+ *	Eric Biederman (Linux Networx)
+ *	Tom Zimmerman (Linux Networx)
+ *	Jim Garlick (Lawrence Livermore National Labs)
+ *	Dave Peterson (Lawrence Livermore National Labs)
+ *	That One Guy (Some other place)
+ *	Wang Zhenyu (intel.com)
+ *
+ * $Id: edac_e7xxx.c,v 1.5.2.9 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define	E7XXX_REVISION " Ver: 2.0.2 " __DATE__
+#define	EDAC_MOD_STR	"e7xxx_edac"
+
+#define e7xxx_printk(level, fmt, arg...) \
+	edac_printk(level, "e7xxx", fmt, ##arg)
+
+#define e7xxx_mc_printk(mci, level, fmt, arg...) \
+	edac_mc_chipset_printk(mci, level, "e7xxx", fmt, ##arg)
+
+#ifndef PCI_DEVICE_ID_INTEL_7205_0
+#define PCI_DEVICE_ID_INTEL_7205_0	0x255d
+#endif				/* PCI_DEVICE_ID_INTEL_7205_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7205_1_ERR
+#define PCI_DEVICE_ID_INTEL_7205_1_ERR	0x2551
+#endif				/* PCI_DEVICE_ID_INTEL_7205_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7500_0
+#define PCI_DEVICE_ID_INTEL_7500_0	0x2540
+#endif				/* PCI_DEVICE_ID_INTEL_7500_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7500_1_ERR
+#define PCI_DEVICE_ID_INTEL_7500_1_ERR	0x2541
+#endif				/* PCI_DEVICE_ID_INTEL_7500_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7501_0
+#define PCI_DEVICE_ID_INTEL_7501_0	0x254c
+#endif				/* PCI_DEVICE_ID_INTEL_7501_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7501_1_ERR
+#define PCI_DEVICE_ID_INTEL_7501_1_ERR	0x2541
+#endif				/* PCI_DEVICE_ID_INTEL_7501_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7505_0
+#define PCI_DEVICE_ID_INTEL_7505_0	0x2550
+#endif				/* PCI_DEVICE_ID_INTEL_7505_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7505_1_ERR
+#define PCI_DEVICE_ID_INTEL_7505_1_ERR	0x2551
+#endif				/* PCI_DEVICE_ID_INTEL_7505_1_ERR */
+
+#define E7XXX_NR_CSROWS		8	/* number of csrows */
+#define E7XXX_NR_DIMMS		8	/* FIXME - is this correct? */
+
+/* E7XXX register addresses - device 0 function 0 */
+#define E7XXX_DRB		0x60	/* DRAM row boundary register (8b) */
+#define E7XXX_DRA		0x70	/* DRAM row attribute register (8b) */
+					/*
+					 * 31   Device width row 7 0=x8 1=x4
+					 * 27   Device width row 6
+					 * 23   Device width row 5
+					 * 19   Device width row 4
+					 * 15   Device width row 3
+					 * 11   Device width row 2
+					 *  7   Device width row 1
+					 *  3   Device width row 0
+					 */
+#define E7XXX_DRC		0x7C	/* DRAM controller mode reg (32b) */
+					/*
+					 * 22    Number channels 0=1,1=2
+					 * 19:18 DRB Granularity 32/64MB
+					 */
+#define E7XXX_TOLM		0xC4	/* DRAM top of low memory reg (16b) */
+#define E7XXX_REMAPBASE		0xC6	/* DRAM remap base address reg (16b) */
+#define E7XXX_REMAPLIMIT	0xC8	/* DRAM remap limit address reg (16b) */
+
+/* E7XXX register addresses - device 0 function 1 */
+#define E7XXX_DRAM_FERR		0x80	/* DRAM first error register (8b) */
+#define E7XXX_DRAM_NERR		0x82	/* DRAM next error register (8b) */
+#define E7XXX_DRAM_CELOG_ADD	0xA0	/* DRAM first correctable memory */
+					/*     error address register (32b) */
+					/*
+					 * 31:28 Reserved
+					 * 27:6  CE address (4k block 33:12)
+					 *  5:0  Reserved
+					 */
+#define E7XXX_DRAM_UELOG_ADD	0xB0	/* DRAM first uncorrectable memory */
+					/*     error address register (32b) */
+					/*
+					 * 31:28 Reserved
+					 * 27:6  CE address (4k block 33:12)
+					 *  5:0  Reserved
+					 */
+#define E7XXX_DRAM_CELOG_SYNDROME 0xD0	/* DRAM first correctable memory */
+					/*     error syndrome register (16b) */
+
+enum e7xxx_chips {
+	E7500 = 0,
+	E7501,
+	E7505,
+	E7205,
+};
+
+struct e7xxx_pvt {
+	struct pci_dev *bridge_ck;
+	u32 tolm;
+	u32 remapbase;
+	u32 remaplimit;
+	const struct e7xxx_dev_info *dev_info;
+};
+
+struct e7xxx_dev_info {
+	u16 err_dev;
+	const char *ctl_name;
+};
+
+struct e7xxx_error_info {
+	u8 dram_ferr;
+	u8 dram_nerr;
+	u32 dram_celog_add;
+	u16 dram_celog_syndrome;
+	u32 dram_uelog_add;
+};
+
+static struct edac_pci_ctl_info *e7xxx_pci;
+
+static const struct e7xxx_dev_info e7xxx_devs[] = {
+	[E7500] = {
+		.err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
+		.ctl_name = "E7500"},
+	[E7501] = {
+		.err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
+		.ctl_name = "E7501"},
+	[E7505] = {
+		.err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
+		.ctl_name = "E7505"},
+	[E7205] = {
+		.err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
+		.ctl_name = "E7205"},
+};
+
+/* FIXME - is this valid for both SECDED and S4ECD4ED? */
+static inline int e7xxx_find_channel(u16 syndrome)
+{
+	debugf3("%s()\n", __func__);
+
+	if ((syndrome & 0xff00) == 0)
+		return 0;
+
+	if ((syndrome & 0x00ff) == 0)
+		return 1;
+
+	if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0)
+		return 0;
+
+	return 1;
+}
+
+static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
+				unsigned long page)
+{
+	u32 remap;
+	struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info;
+
+	debugf3("%s()\n", __func__);
+
+	if ((page < pvt->tolm) ||
+		((page >= 0x100000) && (page < pvt->remapbase)))
+		return page;
+
+	remap = (page - pvt->tolm) + pvt->remapbase;
+
+	if (remap < pvt->remaplimit)
+		return remap;
+
+	e7xxx_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
+	return pvt->tolm - 1;
+}
+
+static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
+{
+	u32 error_1b, page;
+	u16 syndrome;
+	int row;
+	int channel;
+
+	debugf3("%s()\n", __func__);
+	/* read the error address */
+	error_1b = info->dram_celog_add;
+	/* FIXME - should use PAGE_SHIFT */
+	page = error_1b >> 6;	/* convert the address to 4k page */
+	/* read the syndrome */
+	syndrome = info->dram_celog_syndrome;
+	/* FIXME - check for -1 */
+	row = edac_mc_find_csrow_by_page(mci, page);
+	/* convert syndrome to channel */
+	channel = e7xxx_find_channel(syndrome);
+	edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE");
+}
+
+static void process_ce_no_info(struct mem_ctl_info *mci)
+{
+	debugf3("%s()\n", __func__);
+	edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
+}
+
+static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
+{
+	u32 error_2b, block_page;
+	int row;
+
+	debugf3("%s()\n", __func__);
+	/* read the error address */
+	error_2b = info->dram_uelog_add;
+	/* FIXME - should use PAGE_SHIFT */
+	block_page = error_2b >> 6;	/* convert to 4k address */
+	row = edac_mc_find_csrow_by_page(mci, block_page);
+	edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
+}
+
+static void process_ue_no_info(struct mem_ctl_info *mci)
+{
+	debugf3("%s()\n", __func__);
+	edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
+}
+
+static void e7xxx_get_error_info(struct mem_ctl_info *mci,
+				 struct e7xxx_error_info *info)
+{
+	struct e7xxx_pvt *pvt;
+
+	pvt = (struct e7xxx_pvt *)mci->pvt_info;
+	pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, &info->dram_ferr);
+	pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, &info->dram_nerr);
+
+	if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
+		pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
+				&info->dram_celog_add);
+		pci_read_config_word(pvt->bridge_ck,
+				E7XXX_DRAM_CELOG_SYNDROME,
+				&info->dram_celog_syndrome);
+	}
+
+	if ((info->dram_ferr & 2) || (info->dram_nerr & 2))
+		pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD,
+				&info->dram_uelog_add);
+
+	if (info->dram_ferr & 3)
+		pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03);
+
+	if (info->dram_nerr & 3)
+		pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
+}
+
+static int e7xxx_process_error_info(struct mem_ctl_info *mci,
+				struct e7xxx_error_info *info,
+				int handle_errors)
+{
+	int error_found;
+
+	error_found = 0;
+
+	/* decode and report errors */
+	if (info->dram_ferr & 1) {	/* check first error correctable */
+		error_found = 1;
+
+		if (handle_errors)
+			process_ce(mci, info);
+	}
+
+	if (info->dram_ferr & 2) {	/* check first error uncorrectable */
+		error_found = 1;
+
+		if (handle_errors)
+			process_ue(mci, info);
+	}
+
+	if (info->dram_nerr & 1) {	/* check next error correctable */
+		error_found = 1;
+
+		if (handle_errors) {
+			if (info->dram_ferr & 1)
+				process_ce_no_info(mci);
+			else
+				process_ce(mci, info);
+		}
+	}
+
+	if (info->dram_nerr & 2) {	/* check next error uncorrectable */
+		error_found = 1;
+
+		if (handle_errors) {
+			if (info->dram_ferr & 2)
+				process_ue_no_info(mci);
+			else
+				process_ue(mci, info);
+		}
+	}
+
+	return error_found;
+}
+
+static void e7xxx_check(struct mem_ctl_info *mci)
+{
+	struct e7xxx_error_info info;
+
+	debugf3("%s()\n", __func__);
+	e7xxx_get_error_info(mci, &info);
+	e7xxx_process_error_info(mci, &info, 1);
+}
+
+/* Return 1 if dual channel mode is active.  Else return 0. */
+static inline int dual_channel_active(u32 drc, int dev_idx)
+{
+	return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1;
+}
+
+/* Return DRB granularity (0=32mb, 1=64mb). */
+static inline int drb_granularity(u32 drc, int dev_idx)
+{
+	/* only e7501 can be single channel */
+	return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1;
+}
+
+static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
+			int dev_idx, u32 drc)
+{
+	unsigned long last_cumul_size;
+	int index;
+	u8 value;
+	u32 dra, cumul_size;
+	int drc_chan, drc_drbg, drc_ddim, mem_dev;
+	struct csrow_info *csrow;
+
+	pci_read_config_dword(pdev, E7XXX_DRA, &dra);
+	drc_chan = dual_channel_active(drc, dev_idx);
+	drc_drbg = drb_granularity(drc, dev_idx);
+	drc_ddim = (drc >> 20) & 0x3;
+	last_cumul_size = 0;
+
+	/* The dram row boundary (DRB) reg values are boundary address
+	 * for each DRAM row with a granularity of 32 or 64MB (single/dual
+	 * channel operation).  DRB regs are cumulative; therefore DRB7 will
+	 * contain the total memory contained in all eight rows.
+	 */
+	for (index = 0; index < mci->nr_csrows; index++) {
+		/* mem_dev 0=x8, 1=x4 */
+		mem_dev = (dra >> (index * 4 + 3)) & 0x1;
+		csrow = &mci->csrows[index];
+
+		pci_read_config_byte(pdev, E7XXX_DRB + index, &value);
+		/* convert a 64 or 32 MiB DRB to a page size. */
+		cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
+		debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
+			cumul_size);
+		if (cumul_size == last_cumul_size)
+			continue;	/* not populated */
+
+		csrow->first_page = last_cumul_size;
+		csrow->last_page = cumul_size - 1;
+		csrow->nr_pages = cumul_size - last_cumul_size;
+		last_cumul_size = cumul_size;
+		csrow->grain = 1 << 12;	/* 4KiB - resolution of CELOG */
+		csrow->mtype = MEM_RDDR;	/* only one type supported */
+		csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+		/*
+		 * if single channel or x8 devices then SECDED
+		 * if dual channel and x4 then S4ECD4ED
+		 */
+		if (drc_ddim) {
+			if (drc_chan && mem_dev) {
+				csrow->edac_mode = EDAC_S4ECD4ED;
+				mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+			} else {
+				csrow->edac_mode = EDAC_SECDED;
+				mci->edac_cap |= EDAC_FLAG_SECDED;
+			}
+		} else
+			csrow->edac_mode = EDAC_NONE;
+	}
+}
+
+static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
+{
+	u16 pci_data;
+	struct mem_ctl_info *mci = NULL;
+	struct e7xxx_pvt *pvt = NULL;
+	u32 drc;
+	int drc_chan;
+	struct e7xxx_error_info discard;
+
+	debugf0("%s(): mci\n", __func__);
+
+	pci_read_config_dword(pdev, E7XXX_DRC, &drc);
+
+	drc_chan = dual_channel_active(drc, dev_idx);
+	mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1, 0);
+
+	if (mci == NULL)
+		return -ENOMEM;
+
+	debugf3("%s(): init mci\n", __func__);
+	mci->mtype_cap = MEM_FLAG_RDDR;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
+		EDAC_FLAG_S4ECD4ED;
+	/* FIXME - what if different memory types are in different csrows? */
+	mci->mod_name = EDAC_MOD_STR;
+	mci->mod_ver = E7XXX_REVISION;
+	mci->dev = &pdev->dev;
+	debugf3("%s(): init pvt\n", __func__);
+	pvt = (struct e7xxx_pvt *)mci->pvt_info;
+	pvt->dev_info = &e7xxx_devs[dev_idx];
+	pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
+					pvt->dev_info->err_dev, pvt->bridge_ck);
+
+	if (!pvt->bridge_ck) {
+		e7xxx_printk(KERN_ERR, "error reporting device not found:"
+			"vendor %x device 0x%x (broken BIOS?)\n",
+			PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
+		goto fail0;
+	}
+
+	debugf3("%s(): more mci init\n", __func__);
+	mci->ctl_name = pvt->dev_info->ctl_name;
+	mci->dev_name = pci_name(pdev);
+	mci->edac_check = e7xxx_check;
+	mci->ctl_page_to_phys = ctl_page_to_phys;
+	e7xxx_init_csrows(mci, pdev, dev_idx, drc);
+	mci->edac_cap |= EDAC_FLAG_NONE;
+	debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
+	/* load the top of low memory, remap base, and remap limit vars */
+	pci_read_config_word(pdev, E7XXX_TOLM, &pci_data);
+	pvt->tolm = ((u32) pci_data) << 4;
+	pci_read_config_word(pdev, E7XXX_REMAPBASE, &pci_data);
+	pvt->remapbase = ((u32) pci_data) << 14;
+	pci_read_config_word(pdev, E7XXX_REMAPLIMIT, &pci_data);
+	pvt->remaplimit = ((u32) pci_data) << 14;
+	e7xxx_printk(KERN_INFO,
+		"tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
+		pvt->remapbase, pvt->remaplimit);
+
+	/* clear any pending errors, or initial state bits */
+	e7xxx_get_error_info(mci, &discard);
+
+	/* Here we assume that we will never see multiple instances of this
+	 * type of memory controller.  The ID is therefore hardcoded to 0.
+	 */
+	if (edac_mc_add_mc(mci)) {
+		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+		goto fail1;
+	}
+
+	/* allocating generic PCI control info */
+	e7xxx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!e7xxx_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n",
+			__func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
+
+	/* get this far and it's successful */
+	debugf3("%s(): success\n", __func__);
+	return 0;
+
+fail1:
+	pci_dev_put(pvt->bridge_ck);
+
+fail0:
+	edac_mc_free(mci);
+
+	return -ENODEV;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit e7xxx_init_one(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	debugf0("%s()\n", __func__);
+
+	/* wake up and enable device */
+	return pci_enable_device(pdev) ?
+		-EIO : e7xxx_probe1(pdev, ent->driver_data);
+}
+
+static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+	struct e7xxx_pvt *pvt;
+
+	debugf0("%s()\n", __func__);
+
+	if (e7xxx_pci)
+		edac_pci_release_generic_ctl(e7xxx_pci);
+
+	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+		return;
+
+	pvt = (struct e7xxx_pvt *)mci->pvt_info;
+	pci_dev_put(pvt->bridge_ck);
+	edac_mc_free(mci);
+}
+
+static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
+	{
+	 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 E7205},
+	{
+	 PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 E7500},
+	{
+	 PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 E7501},
+	{
+	 PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 E7505},
+	{
+	 0,
+	 }			/* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
+
+static struct pci_driver e7xxx_driver = {
+	.name = EDAC_MOD_STR,
+	.probe = e7xxx_init_one,
+	.remove = __devexit_p(e7xxx_remove_one),
+	.id_table = e7xxx_pci_tbl,
+};
+
+static int __init e7xxx_init(void)
+{
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
+	return pci_register_driver(&e7xxx_driver);
+}
+
+static void __exit e7xxx_exit(void)
+{
+	pci_unregister_driver(&e7xxx_driver);
+}
+
+module_init(e7xxx_init);
+module_exit(e7xxx_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
+		"Based on.work by Dan Hollis et al");
+MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
new file mode 100644
index 0000000..12f355c
--- /dev/null
+++ b/drivers/edac/edac_core.h
@@ -0,0 +1,878 @@
+/*
+ * Defines, structures, APIs for edac_core module
+ *
+ * (C) 2007 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ *	http://www.anime.net/~goemon/linux-ecc/
+ *
+ * NMI handling support added by
+ *     Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
+ *
+ * Refactored for multi-source files:
+ *	Doug Thompson <norsk5@xmission.com>
+ *
+ */
+
+#ifndef _EDAC_CORE_H_
+#define _EDAC_CORE_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/pci.h>
+#include <linux/time.h>
+#include <linux/nmi.h>
+#include <linux/rcupdate.h>
+#include <linux/completion.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/sysdev.h>
+#include <linux/workqueue.h>
+
+#define EDAC_MC_LABEL_LEN	31
+#define EDAC_DEVICE_NAME_LEN	31
+#define EDAC_ATTRIB_VALUE_LEN	15
+#define MC_PROC_NAME_MAX_LEN	7
+
+#if PAGE_SHIFT < 20
+#define PAGES_TO_MiB( pages )	( ( pages ) >> ( 20 - PAGE_SHIFT ) )
+#else				/* PAGE_SHIFT > 20 */
+#define PAGES_TO_MiB( pages )	( ( pages ) << ( PAGE_SHIFT - 20 ) )
+#endif
+
+#define edac_printk(level, prefix, fmt, arg...) \
+	printk(level "EDAC " prefix ": " fmt, ##arg)
+
+#define edac_printk_verbose(level, prefix, fmt, arg...) \
+	printk(level "EDAC " prefix ": " "in %s, line at %d: " fmt,	\
+	       __FILE__, __LINE__, ##arg)
+
+#define edac_mc_printk(mci, level, fmt, arg...) \
+	printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg)
+
+#define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \
+	printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg)
+
+/* edac_device printk */
+#define edac_device_printk(ctl, level, fmt, arg...) \
+	printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg)
+
+/* edac_pci printk */
+#define edac_pci_printk(ctl, level, fmt, arg...) \
+	printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg)
+
+/* prefixes for edac_printk() and edac_mc_printk() */
+#define EDAC_MC "MC"
+#define EDAC_PCI "PCI"
+#define EDAC_DEBUG "DEBUG"
+
+#ifdef CONFIG_EDAC_DEBUG
+extern int edac_debug_level;
+
+#ifndef CONFIG_EDAC_DEBUG_VERBOSE
+#define edac_debug_printk(level, fmt, arg...)                           \
+	do {                                                            \
+		if (level <= edac_debug_level)                          \
+			edac_printk(KERN_DEBUG, EDAC_DEBUG,		\
+				    "%s: " fmt, __func__, ##arg);	\
+	} while (0)
+#else  /* CONFIG_EDAC_DEBUG_VERBOSE */
+#define edac_debug_printk(level, fmt, arg...)                            \
+	do {                                                             \
+		if (level <= edac_debug_level)                           \
+			edac_printk_verbose(KERN_DEBUG, EDAC_DEBUG, fmt, \
+					    ##arg);			\
+	} while (0)
+#endif
+
+#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
+#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
+#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ )
+#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
+#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
+
+#else				/* !CONFIG_EDAC_DEBUG */
+
+#define debugf0( ... )
+#define debugf1( ... )
+#define debugf2( ... )
+#define debugf3( ... )
+#define debugf4( ... )
+
+#endif				/* !CONFIG_EDAC_DEBUG */
+
+#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
+	PCI_DEVICE_ID_ ## vend ## _ ## dev
+
+#define edac_dev_name(dev) (dev)->dev_name
+
+/* memory devices */
+enum dev_type {
+	DEV_UNKNOWN = 0,
+	DEV_X1,
+	DEV_X2,
+	DEV_X4,
+	DEV_X8,
+	DEV_X16,
+	DEV_X32,		/* Do these parts exist? */
+	DEV_X64			/* Do these parts exist? */
+};
+
+#define DEV_FLAG_UNKNOWN	BIT(DEV_UNKNOWN)
+#define DEV_FLAG_X1		BIT(DEV_X1)
+#define DEV_FLAG_X2		BIT(DEV_X2)
+#define DEV_FLAG_X4		BIT(DEV_X4)
+#define DEV_FLAG_X8		BIT(DEV_X8)
+#define DEV_FLAG_X16		BIT(DEV_X16)
+#define DEV_FLAG_X32		BIT(DEV_X32)
+#define DEV_FLAG_X64		BIT(DEV_X64)
+
+/* memory types */
+enum mem_type {
+	MEM_EMPTY = 0,		/* Empty csrow */
+	MEM_RESERVED,		/* Reserved csrow type */
+	MEM_UNKNOWN,		/* Unknown csrow type */
+	MEM_FPM,		/* Fast page mode */
+	MEM_EDO,		/* Extended data out */
+	MEM_BEDO,		/* Burst Extended data out */
+	MEM_SDR,		/* Single data rate SDRAM */
+	MEM_RDR,		/* Registered single data rate SDRAM */
+	MEM_DDR,		/* Double data rate SDRAM */
+	MEM_RDDR,		/* Registered Double data rate SDRAM */
+	MEM_RMBS,		/* Rambus DRAM */
+	MEM_DDR2,		/* DDR2 RAM */
+	MEM_FB_DDR2,		/* fully buffered DDR2 */
+	MEM_RDDR2,		/* Registered DDR2 RAM */
+	MEM_XDR,		/* Rambus XDR */
+	MEM_DDR3,		/* DDR3 RAM */
+	MEM_RDDR3,		/* Registered DDR3 RAM */
+};
+
+#define MEM_FLAG_EMPTY		BIT(MEM_EMPTY)
+#define MEM_FLAG_RESERVED	BIT(MEM_RESERVED)
+#define MEM_FLAG_UNKNOWN	BIT(MEM_UNKNOWN)
+#define MEM_FLAG_FPM		BIT(MEM_FPM)
+#define MEM_FLAG_EDO		BIT(MEM_EDO)
+#define MEM_FLAG_BEDO		BIT(MEM_BEDO)
+#define MEM_FLAG_SDR		BIT(MEM_SDR)
+#define MEM_FLAG_RDR		BIT(MEM_RDR)
+#define MEM_FLAG_DDR		BIT(MEM_DDR)
+#define MEM_FLAG_RDDR		BIT(MEM_RDDR)
+#define MEM_FLAG_RMBS		BIT(MEM_RMBS)
+#define MEM_FLAG_DDR2           BIT(MEM_DDR2)
+#define MEM_FLAG_FB_DDR2        BIT(MEM_FB_DDR2)
+#define MEM_FLAG_RDDR2          BIT(MEM_RDDR2)
+#define MEM_FLAG_XDR            BIT(MEM_XDR)
+#define MEM_FLAG_DDR3		 BIT(MEM_DDR3)
+#define MEM_FLAG_RDDR3		 BIT(MEM_RDDR3)
+
+/* chipset Error Detection and Correction capabilities and mode */
+enum edac_type {
+	EDAC_UNKNOWN = 0,	/* Unknown if ECC is available */
+	EDAC_NONE,		/* Doesnt support ECC */
+	EDAC_RESERVED,		/* Reserved ECC type */
+	EDAC_PARITY,		/* Detects parity errors */
+	EDAC_EC,		/* Error Checking - no correction */
+	EDAC_SECDED,		/* Single bit error correction, Double detection */
+	EDAC_S2ECD2ED,		/* Chipkill x2 devices - do these exist? */
+	EDAC_S4ECD4ED,		/* Chipkill x4 devices */
+	EDAC_S8ECD8ED,		/* Chipkill x8 devices */
+	EDAC_S16ECD16ED,	/* Chipkill x16 devices */
+};
+
+#define EDAC_FLAG_UNKNOWN	BIT(EDAC_UNKNOWN)
+#define EDAC_FLAG_NONE		BIT(EDAC_NONE)
+#define EDAC_FLAG_PARITY	BIT(EDAC_PARITY)
+#define EDAC_FLAG_EC		BIT(EDAC_EC)
+#define EDAC_FLAG_SECDED	BIT(EDAC_SECDED)
+#define EDAC_FLAG_S2ECD2ED	BIT(EDAC_S2ECD2ED)
+#define EDAC_FLAG_S4ECD4ED	BIT(EDAC_S4ECD4ED)
+#define EDAC_FLAG_S8ECD8ED	BIT(EDAC_S8ECD8ED)
+#define EDAC_FLAG_S16ECD16ED	BIT(EDAC_S16ECD16ED)
+
+/* scrubbing capabilities */
+enum scrub_type {
+	SCRUB_UNKNOWN = 0,	/* Unknown if scrubber is available */
+	SCRUB_NONE,		/* No scrubber */
+	SCRUB_SW_PROG,		/* SW progressive (sequential) scrubbing */
+	SCRUB_SW_SRC,		/* Software scrub only errors */
+	SCRUB_SW_PROG_SRC,	/* Progressive software scrub from an error */
+	SCRUB_SW_TUNABLE,	/* Software scrub frequency is tunable */
+	SCRUB_HW_PROG,		/* HW progressive (sequential) scrubbing */
+	SCRUB_HW_SRC,		/* Hardware scrub only errors */
+	SCRUB_HW_PROG_SRC,	/* Progressive hardware scrub from an error */
+	SCRUB_HW_TUNABLE	/* Hardware scrub frequency is tunable */
+};
+
+#define SCRUB_FLAG_SW_PROG	BIT(SCRUB_SW_PROG)
+#define SCRUB_FLAG_SW_SRC	BIT(SCRUB_SW_SRC)
+#define SCRUB_FLAG_SW_PROG_SRC	BIT(SCRUB_SW_PROG_SRC)
+#define SCRUB_FLAG_SW_TUN	BIT(SCRUB_SW_SCRUB_TUNABLE)
+#define SCRUB_FLAG_HW_PROG	BIT(SCRUB_HW_PROG)
+#define SCRUB_FLAG_HW_SRC	BIT(SCRUB_HW_SRC)
+#define SCRUB_FLAG_HW_PROG_SRC	BIT(SCRUB_HW_PROG_SRC)
+#define SCRUB_FLAG_HW_TUN	BIT(SCRUB_HW_TUNABLE)
+
+/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */
+
+/* EDAC internal operation states */
+#define	OP_ALLOC		0x100
+#define OP_RUNNING_POLL		0x201
+#define OP_RUNNING_INTERRUPT	0x202
+#define OP_RUNNING_POLL_INTR	0x203
+#define OP_OFFLINE		0x300
+
+/*
+ * There are several things to be aware of that aren't at all obvious:
+ *
+ *
+ * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
+ *
+ * These are some of the many terms that are thrown about that don't always
+ * mean what people think they mean (Inconceivable!).  In the interest of
+ * creating a common ground for discussion, terms and their definitions
+ * will be established.
+ *
+ * Memory devices:	The individual chip on a memory stick.  These devices
+ *			commonly output 4 and 8 bits each.  Grouping several
+ *			of these in parallel provides 64 bits which is common
+ *			for a memory stick.
+ *
+ * Memory Stick:	A printed circuit board that agregates multiple
+ *			memory devices in parallel.  This is the atomic
+ *			memory component that is purchaseable by Joe consumer
+ *			and loaded into a memory socket.
+ *
+ * Socket:		A physical connector on the motherboard that accepts
+ *			a single memory stick.
+ *
+ * Channel:		Set of memory devices on a memory stick that must be
+ *			grouped in parallel with one or more additional
+ *			channels from other memory sticks.  This parallel
+ *			grouping of the output from multiple channels are
+ *			necessary for the smallest granularity of memory access.
+ *			Some memory controllers are capable of single channel -
+ *			which means that memory sticks can be loaded
+ *			individually.  Other memory controllers are only
+ *			capable of dual channel - which means that memory
+ *			sticks must be loaded as pairs (see "socket set").
+ *
+ * Chip-select row:	All of the memory devices that are selected together.
+ *			for a single, minimum grain of memory access.
+ *			This selects all of the parallel memory devices across
+ *			all of the parallel channels.  Common chip-select rows
+ *			for single channel are 64 bits, for dual channel 128
+ *			bits.
+ *
+ * Single-Ranked stick:	A Single-ranked stick has 1 chip-select row of memmory.
+ *			Motherboards commonly drive two chip-select pins to
+ *			a memory stick. A single-ranked stick, will occupy
+ *			only one of those rows. The other will be unused.
+ *
+ * Double-Ranked stick:	A double-ranked stick has two chip-select rows which
+ *			access different sets of memory devices.  The two
+ *			rows cannot be accessed concurrently.
+ *
+ * Double-sided stick:	DEPRECATED TERM, see Double-Ranked stick.
+ *			A double-sided stick has two chip-select rows which
+ *			access different sets of memory devices.  The two
+ *			rows cannot be accessed concurrently.  "Double-sided"
+ *			is irrespective of the memory devices being mounted
+ *			on both sides of the memory stick.
+ *
+ * Socket set:		All of the memory sticks that are required for
+ *			a single memory access or all of the memory sticks
+ *			spanned by a chip-select row.  A single socket set
+ *			has two chip-select rows and if double-sided sticks
+ *			are used these will occupy those chip-select rows.
+ *
+ * Bank:		This term is avoided because it is unclear when
+ *			needing to distinguish between chip-select rows and
+ *			socket sets.
+ *
+ * Controller pages:
+ *
+ * Physical pages:
+ *
+ * Virtual pages:
+ *
+ *
+ * STRUCTURE ORGANIZATION AND CHOICES
+ *
+ *
+ *
+ * PS - I enjoyed writing all that about as much as you enjoyed reading it.
+ */
+
+struct channel_info {
+	int chan_idx;		/* channel index */
+	u32 ce_count;		/* Correctable Errors for this CHANNEL */
+	char label[EDAC_MC_LABEL_LEN + 1];	/* DIMM label on motherboard */
+	struct csrow_info *csrow;	/* the parent */
+};
+
+struct csrow_info {
+	unsigned long first_page;	/* first page number in dimm */
+	unsigned long last_page;	/* last page number in dimm */
+	unsigned long page_mask;	/* used for interleaving -
+					 * 0UL for non intlv
+					 */
+	u32 nr_pages;		/* number of pages in csrow */
+	u32 grain;		/* granularity of reported error in bytes */
+	int csrow_idx;		/* the chip-select row */
+	enum dev_type dtype;	/* memory device type */
+	u32 ue_count;		/* Uncorrectable Errors for this csrow */
+	u32 ce_count;		/* Correctable Errors for this csrow */
+	enum mem_type mtype;	/* memory csrow type */
+	enum edac_type edac_mode;	/* EDAC mode for this csrow */
+	struct mem_ctl_info *mci;	/* the parent */
+
+	struct kobject kobj;	/* sysfs kobject for this csrow */
+
+	/* channel information for this csrow */
+	u32 nr_channels;
+	struct channel_info *channels;
+};
+
+/* mcidev_sysfs_attribute structure
+ *	used for driver sysfs attributes and in mem_ctl_info
+ * 	sysfs top level entries
+ */
+struct mcidev_sysfs_attribute {
+        struct attribute attr;
+        ssize_t (*show)(struct mem_ctl_info *,char *);
+        ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
+};
+
+/* MEMORY controller information structure
+ */
+struct mem_ctl_info {
+	struct list_head link;	/* for global list of mem_ctl_info structs */
+
+	struct module *owner;	/* Module owner of this control struct */
+
+	unsigned long mtype_cap;	/* memory types supported by mc */
+	unsigned long edac_ctl_cap;	/* Mem controller EDAC capabilities */
+	unsigned long edac_cap;	/* configuration capabilities - this is
+				 * closely related to edac_ctl_cap.  The
+				 * difference is that the controller may be
+				 * capable of s4ecd4ed which would be listed
+				 * in edac_ctl_cap, but if channels aren't
+				 * capable of s4ecd4ed then the edac_cap would
+				 * not have that capability.
+				 */
+	unsigned long scrub_cap;	/* chipset scrub capabilities */
+	enum scrub_type scrub_mode;	/* current scrub mode */
+
+	/* Translates sdram memory scrub rate given in bytes/sec to the
+	   internal representation and configures whatever else needs
+	   to be configured.
+	 */
+	int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw);
+
+	/* Get the current sdram memory scrub rate from the internal
+	   representation and converts it to the closest matching
+	   bandwith in bytes/sec.
+	 */
+	int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw);
+
+
+	/* pointer to edac checking routine */
+	void (*edac_check) (struct mem_ctl_info * mci);
+
+	/*
+	 * Remaps memory pages: controller pages to physical pages.
+	 * For most MC's, this will be NULL.
+	 */
+	/* FIXME - why not send the phys page to begin with? */
+	unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
+					   unsigned long page);
+	int mc_idx;
+	int nr_csrows;
+	struct csrow_info *csrows;
+	/*
+	 * FIXME - what about controllers on other busses? - IDs must be
+	 * unique.  dev pointer should be sufficiently unique, but
+	 * BUS:SLOT.FUNC numbers may not be unique.
+	 */
+	struct device *dev;
+	const char *mod_name;
+	const char *mod_ver;
+	const char *ctl_name;
+	const char *dev_name;
+	char proc_name[MC_PROC_NAME_MAX_LEN + 1];
+	void *pvt_info;
+	u32 ue_noinfo_count;	/* Uncorrectable Errors w/o info */
+	u32 ce_noinfo_count;	/* Correctable Errors w/o info */
+	u32 ue_count;		/* Total Uncorrectable Errors for this MC */
+	u32 ce_count;		/* Total Correctable Errors for this MC */
+	unsigned long start_time;	/* mci load start time (in jiffies) */
+
+	/* this stuff is for safe removal of mc devices from global list while
+	 * NMI handlers may be traversing list
+	 */
+	struct rcu_head rcu;
+	struct completion complete;
+
+	/* edac sysfs device control */
+	struct kobject edac_mci_kobj;
+
+	/* Additional top controller level attributes, but specified
+	 * by the low level driver.
+	 *
+	 * Set by the low level driver to provide attributes at the
+	 * controller level, same level as 'ue_count' and 'ce_count' above.
+	 * An array of structures, NULL terminated
+	 *
+	 * If attributes are desired, then set to array of attributes
+	 * If no attributes are desired, leave NULL
+	 */
+	struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes;
+
+	/* work struct for this MC */
+	struct delayed_work work;
+
+	/* the internal state of this controller instance */
+	int op_state;
+};
+
+/*
+ * The following are the structures to provide for a generic
+ * or abstract 'edac_device'. This set of structures and the
+ * code that implements the APIs for the same, provide for
+ * registering EDAC type devices which are NOT standard memory.
+ *
+ * CPU caches (L1 and L2)
+ * DMA engines
+ * Core CPU swithces
+ * Fabric switch units
+ * PCIe interface controllers
+ * other EDAC/ECC type devices that can be monitored for
+ * errors, etc.
+ *
+ * It allows for a 2 level set of hiearchry. For example:
+ *
+ * cache could be composed of L1, L2 and L3 levels of cache.
+ * Each CPU core would have its own L1 cache, while sharing
+ * L2 and maybe L3 caches.
+ *
+ * View them arranged, via the sysfs presentation:
+ * /sys/devices/system/edac/..
+ *
+ *	mc/		<existing memory device directory>
+ *	cpu/cpu0/..	<L1 and L2 block directory>
+ *		/L1-cache/ce_count
+ *			 /ue_count
+ *		/L2-cache/ce_count
+ *			 /ue_count
+ *	cpu/cpu1/..	<L1 and L2 block directory>
+ *		/L1-cache/ce_count
+ *			 /ue_count
+ *		/L2-cache/ce_count
+ *			 /ue_count
+ *	...
+ *
+ *	the L1 and L2 directories would be "edac_device_block's"
+ */
+
+struct edac_device_counter {
+	u32 ue_count;
+	u32 ce_count;
+};
+
+/* forward reference */
+struct edac_device_ctl_info;
+struct edac_device_block;
+
+/* edac_dev_sysfs_attribute structure
+ *	used for driver sysfs attributes in mem_ctl_info
+ *	for extra controls and attributes:
+ *		like high level error Injection controls
+ */
+struct edac_dev_sysfs_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct edac_device_ctl_info *, char *);
+	ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t);
+};
+
+/* edac_dev_sysfs_block_attribute structure
+ *
+ *	used in leaf 'block' nodes for adding controls/attributes
+ *
+ *	each block in each instance of the containing control structure
+ *	can have an array of the following. The show and store functions
+ *	will be filled in with the show/store function in the
+ *	low level driver.
+ *
+ *	The 'value' field will be the actual value field used for
+ *	counting
+ */
+struct edac_dev_sysfs_block_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct kobject *, struct attribute *, char *);
+	ssize_t (*store)(struct kobject *, struct attribute *,
+			const char *, size_t);
+	struct edac_device_block *block;
+
+	unsigned int value;
+};
+
+/* device block control structure */
+struct edac_device_block {
+	struct edac_device_instance *instance;	/* Up Pointer */
+	char name[EDAC_DEVICE_NAME_LEN + 1];
+
+	struct edac_device_counter counters;	/* basic UE and CE counters */
+
+	int nr_attribs;		/* how many attributes */
+
+	/* this block's attributes, could be NULL */
+	struct edac_dev_sysfs_block_attribute *block_attributes;
+
+	/* edac sysfs device control */
+	struct kobject kobj;
+};
+
+/* device instance control structure */
+struct edac_device_instance {
+	struct edac_device_ctl_info *ctl;	/* Up pointer */
+	char name[EDAC_DEVICE_NAME_LEN + 4];
+
+	struct edac_device_counter counters;	/* instance counters */
+
+	u32 nr_blocks;		/* how many blocks */
+	struct edac_device_block *blocks;	/* block array */
+
+	/* edac sysfs device control */
+	struct kobject kobj;
+};
+
+
+/*
+ * Abstract edac_device control info structure
+ *
+ */
+struct edac_device_ctl_info {
+	/* for global list of edac_device_ctl_info structs */
+	struct list_head link;
+
+	struct module *owner;	/* Module owner of this control struct */
+
+	int dev_idx;
+
+	/* Per instance controls for this edac_device */
+	int log_ue;		/* boolean for logging UEs */
+	int log_ce;		/* boolean for logging CEs */
+	int panic_on_ue;	/* boolean for panic'ing on an UE */
+	unsigned poll_msec;	/* number of milliseconds to poll interval */
+	unsigned long delay;	/* number of jiffies for poll_msec */
+
+	/* Additional top controller level attributes, but specified
+	 * by the low level driver.
+	 *
+	 * Set by the low level driver to provide attributes at the
+	 * controller level, same level as 'ue_count' and 'ce_count' above.
+	 * An array of structures, NULL terminated
+	 *
+	 * If attributes are desired, then set to array of attributes
+	 * If no attributes are desired, leave NULL
+	 */
+	struct edac_dev_sysfs_attribute *sysfs_attributes;
+
+	/* pointer to main 'edac' class in sysfs */
+	struct sysdev_class *edac_class;
+
+	/* the internal state of this controller instance */
+	int op_state;
+	/* work struct for this instance */
+	struct delayed_work work;
+
+	/* pointer to edac polling checking routine:
+	 *      If NOT NULL: points to polling check routine
+	 *      If NULL: Then assumes INTERRUPT operation, where
+	 *              MC driver will receive events
+	 */
+	void (*edac_check) (struct edac_device_ctl_info * edac_dev);
+
+	struct device *dev;	/* pointer to device structure */
+
+	const char *mod_name;	/* module name */
+	const char *ctl_name;	/* edac controller  name */
+	const char *dev_name;	/* pci/platform/etc... name */
+
+	void *pvt_info;		/* pointer to 'private driver' info */
+
+	unsigned long start_time;	/* edac_device load start time (jiffies) */
+
+	/* these are for safe removal of mc devices from global list while
+	 * NMI handlers may be traversing list
+	 */
+	struct rcu_head rcu;
+	struct completion removal_complete;
+
+	/* sysfs top name under 'edac' directory
+	 * and instance name:
+	 *      cpu/cpu0/...
+	 *      cpu/cpu1/...
+	 *      cpu/cpu2/...
+	 *      ...
+	 */
+	char name[EDAC_DEVICE_NAME_LEN + 1];
+
+	/* Number of instances supported on this control structure
+	 * and the array of those instances
+	 */
+	u32 nr_instances;
+	struct edac_device_instance *instances;
+
+	/* Event counters for the this whole EDAC Device */
+	struct edac_device_counter counters;
+
+	/* edac sysfs device control for the 'name'
+	 * device this structure controls
+	 */
+	struct kobject kobj;
+};
+
+/* To get from the instance's wq to the beginning of the ctl structure */
+#define to_edac_mem_ctl_work(w) \
+		container_of(w, struct mem_ctl_info, work)
+
+#define to_edac_device_ctl_work(w) \
+		container_of(w,struct edac_device_ctl_info,work)
+
+/*
+ * The alloc() and free() functions for the 'edac_device' control info
+ * structure. A MC driver will allocate one of these for each edac_device
+ * it is going to control/register with the EDAC CORE.
+ */
+extern struct edac_device_ctl_info *edac_device_alloc_ctl_info(
+		unsigned sizeof_private,
+		char *edac_device_name, unsigned nr_instances,
+		char *edac_block_name, unsigned nr_blocks,
+		unsigned offset_value,
+		struct edac_dev_sysfs_block_attribute *block_attributes,
+		unsigned nr_attribs,
+		int device_index);
+
+/* The offset value can be:
+ *	-1 indicating no offset value
+ *	0 for zero-based block numbers
+ *	1 for 1-based block number
+ *	other for other-based block number
+ */
+#define	BLOCK_OFFSET_VALUE_OFF	((unsigned) -1)
+
+extern void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info);
+
+#ifdef CONFIG_PCI
+
+struct edac_pci_counter {
+	atomic_t pe_count;
+	atomic_t npe_count;
+};
+
+/*
+ * Abstract edac_pci control info structure
+ *
+ */
+struct edac_pci_ctl_info {
+	/* for global list of edac_pci_ctl_info structs */
+	struct list_head link;
+
+	int pci_idx;
+
+	struct sysdev_class *edac_class;	/* pointer to class */
+
+	/* the internal state of this controller instance */
+	int op_state;
+	/* work struct for this instance */
+	struct delayed_work work;
+
+	/* pointer to edac polling checking routine:
+	 *      If NOT NULL: points to polling check routine
+	 *      If NULL: Then assumes INTERRUPT operation, where
+	 *              MC driver will receive events
+	 */
+	void (*edac_check) (struct edac_pci_ctl_info * edac_dev);
+
+	struct device *dev;	/* pointer to device structure */
+
+	const char *mod_name;	/* module name */
+	const char *ctl_name;	/* edac controller  name */
+	const char *dev_name;	/* pci/platform/etc... name */
+
+	void *pvt_info;		/* pointer to 'private driver' info */
+
+	unsigned long start_time;	/* edac_pci load start time (jiffies) */
+
+	/* these are for safe removal of devices from global list while
+	 * NMI handlers may be traversing list
+	 */
+	struct rcu_head rcu;
+	struct completion complete;
+
+	/* sysfs top name under 'edac' directory
+	 * and instance name:
+	 *      cpu/cpu0/...
+	 *      cpu/cpu1/...
+	 *      cpu/cpu2/...
+	 *      ...
+	 */
+	char name[EDAC_DEVICE_NAME_LEN + 1];
+
+	/* Event counters for the this whole EDAC Device */
+	struct edac_pci_counter counters;
+
+	/* edac sysfs device control for the 'name'
+	 * device this structure controls
+	 */
+	struct kobject kobj;
+	struct completion kobj_complete;
+};
+
+#define to_edac_pci_ctl_work(w) \
+		container_of(w, struct edac_pci_ctl_info,work)
+
+/* write all or some bits in a byte-register*/
+static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value,
+				   u8 mask)
+{
+	if (mask != 0xff) {
+		u8 buf;
+
+		pci_read_config_byte(pdev, offset, &buf);
+		value &= mask;
+		buf &= ~mask;
+		value |= buf;
+	}
+
+	pci_write_config_byte(pdev, offset, value);
+}
+
+/* write all or some bits in a word-register*/
+static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
+				    u16 value, u16 mask)
+{
+	if (mask != 0xffff) {
+		u16 buf;
+
+		pci_read_config_word(pdev, offset, &buf);
+		value &= mask;
+		buf &= ~mask;
+		value |= buf;
+	}
+
+	pci_write_config_word(pdev, offset, value);
+}
+
+/*
+ * pci_write_bits32
+ *
+ * edac local routine to do pci_write_config_dword, but adds
+ * a mask parameter. If mask is all ones, ignore the mask.
+ * Otherwise utilize the mask to isolate specified bits
+ *
+ * write all or some bits in a dword-register
+ */
+static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
+				    u32 value, u32 mask)
+{
+	if (mask != 0xffffffff) {
+		u32 buf;
+
+		pci_read_config_dword(pdev, offset, &buf);
+		value &= mask;
+		buf &= ~mask;
+		value |= buf;
+	}
+
+	pci_write_config_dword(pdev, offset, value);
+}
+
+#endif				/* CONFIG_PCI */
+
+extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
+					  unsigned nr_chans, int edac_index);
+extern int edac_mc_add_mc(struct mem_ctl_info *mci);
+extern void edac_mc_free(struct mem_ctl_info *mci);
+extern struct mem_ctl_info *edac_mc_find(int idx);
+extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
+extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
+				      unsigned long page);
+
+/*
+ * The no info errors are used when error overflows are reported.
+ * There are a limited number of error logging registers that can
+ * be exausted.  When all registers are exhausted and an additional
+ * error occurs then an error overflow register records that an
+ * error occured and the type of error, but doesn't have any
+ * further information.  The ce/ue versions make for cleaner
+ * reporting logic and function interface - reduces conditional
+ * statement clutter and extra function arguments.
+ */
+extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
+			      unsigned long page_frame_number,
+			      unsigned long offset_in_page,
+			      unsigned long syndrome, int row, int channel,
+			      const char *msg);
+extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
+				      const char *msg);
+extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
+			      unsigned long page_frame_number,
+			      unsigned long offset_in_page, int row,
+			      const char *msg);
+extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
+				      const char *msg);
+extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, unsigned int csrow,
+				  unsigned int channel0, unsigned int channel1,
+				  char *msg);
+extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, unsigned int csrow,
+				  unsigned int channel, char *msg);
+
+/*
+ * edac_device APIs
+ */
+extern int edac_device_add_device(struct edac_device_ctl_info *edac_dev);
+extern struct edac_device_ctl_info *edac_device_del_device(struct device *dev);
+extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
+				int inst_nr, int block_nr, const char *msg);
+extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
+				int inst_nr, int block_nr, const char *msg);
+extern int edac_device_alloc_index(void);
+
+/*
+ * edac_pci APIs
+ */
+extern struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
+				const char *edac_pci_name);
+
+extern void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci);
+
+extern void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
+				unsigned long value);
+
+extern int edac_pci_alloc_index(void);
+extern int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx);
+extern struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev);
+
+extern struct edac_pci_ctl_info *edac_pci_create_generic_ctl(
+				struct device *dev,
+				const char *mod_name);
+
+extern void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci);
+extern int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci);
+extern void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci);
+
+/*
+ * edac misc APIs
+ */
+extern char *edac_op_state_to_string(int op_state);
+
+#endif				/* _EDAC_CORE_H_ */
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
new file mode 100644
index 0000000..d5e13c9
--- /dev/null
+++ b/drivers/edac/edac_device.c
@@ -0,0 +1,736 @@
+
+/*
+ * edac_device.c
+ * (C) 2007 www.douglaskthompson.com
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Doug Thompson <norsk5@xmission.com>
+ *
+ * edac_device API implementation
+ * 19 Jan 2007
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/highmem.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sysdev.h>
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+#include <asm/uaccess.h>
+#include <asm/page.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+/* lock for the list: 'edac_device_list', manipulation of this list
+ * is protected by the 'device_ctls_mutex' lock
+ */
+static DEFINE_MUTEX(device_ctls_mutex);
+static LIST_HEAD(edac_device_list);
+
+#ifdef CONFIG_EDAC_DEBUG
+static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
+{
+	debugf3("\tedac_dev = %p dev_idx=%d \n", edac_dev, edac_dev->dev_idx);
+	debugf4("\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
+	debugf3("\tdev = %p\n", edac_dev->dev);
+	debugf3("\tmod_name:ctl_name = %s:%s\n",
+		edac_dev->mod_name, edac_dev->ctl_name);
+	debugf3("\tpvt_info = %p\n\n", edac_dev->pvt_info);
+}
+#endif				/* CONFIG_EDAC_DEBUG */
+
+
+/*
+ * edac_device_alloc_ctl_info()
+ *	Allocate a new edac device control info structure
+ *
+ *	The control structure is allocated in complete chunk
+ *	from the OS. It is in turn sub allocated to the
+ *	various objects that compose the struture
+ *
+ *	The structure has a 'nr_instance' array within itself.
+ *	Each instance represents a major component
+ *		Example:  L1 cache and L2 cache are 2 instance components
+ *
+ *	Within each instance is an array of 'nr_blocks' blockoffsets
+ */
+struct edac_device_ctl_info *edac_device_alloc_ctl_info(
+	unsigned sz_private,
+	char *edac_device_name, unsigned nr_instances,
+	char *edac_block_name, unsigned nr_blocks,
+	unsigned offset_value,		/* zero, 1, or other based offset */
+	struct edac_dev_sysfs_block_attribute *attrib_spec, unsigned nr_attrib,
+	int device_index)
+{
+	struct edac_device_ctl_info *dev_ctl;
+	struct edac_device_instance *dev_inst, *inst;
+	struct edac_device_block *dev_blk, *blk_p, *blk;
+	struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib;
+	unsigned total_size;
+	unsigned count;
+	unsigned instance, block, attr;
+	void *pvt;
+	int err;
+
+	debugf4("%s() instances=%d blocks=%d\n",
+		__func__, nr_instances, nr_blocks);
+
+	/* Calculate the size of memory we need to allocate AND
+	 * determine the offsets of the various item arrays
+	 * (instance,block,attrib) from the start of an  allocated structure.
+	 * We want the alignment of each item  (instance,block,attrib)
+	 * to be at least as stringent as what the compiler would
+	 * provide if we could simply hardcode everything into a single struct.
+	 */
+	dev_ctl = (struct edac_device_ctl_info *)NULL;
+
+	/* Calc the 'end' offset past end of ONE ctl_info structure
+	 * which will become the start of the 'instance' array
+	 */
+	dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst));
+
+	/* Calc the 'end' offset past the instance array within the ctl_info
+	 * which will become the start of the block array
+	 */
+	dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk));
+
+	/* Calc the 'end' offset past the dev_blk array
+	 * which will become the start of the attrib array, if any.
+	 */
+	count = nr_instances * nr_blocks;
+	dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib));
+
+	/* Check for case of when an attribute array is specified */
+	if (nr_attrib > 0) {
+		/* calc how many nr_attrib we need */
+		count *= nr_attrib;
+
+		/* Calc the 'end' offset past the attributes array */
+		pvt = edac_align_ptr(&dev_attrib[count], sz_private);
+	} else {
+		/* no attribute array specificed */
+		pvt = edac_align_ptr(dev_attrib, sz_private);
+	}
+
+	/* 'pvt' now points to where the private data area is.
+	 * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
+	 * is baselined at ZERO
+	 */
+	total_size = ((unsigned long)pvt) + sz_private;
+
+	/* Allocate the amount of memory for the set of control structures */
+	dev_ctl = kzalloc(total_size, GFP_KERNEL);
+	if (dev_ctl == NULL)
+		return NULL;
+
+	/* Adjust pointers so they point within the actual memory we
+	 * just allocated rather than an imaginary chunk of memory
+	 * located at address 0.
+	 * 'dev_ctl' points to REAL memory, while the others are
+	 * ZERO based and thus need to be adjusted to point within
+	 * the allocated memory.
+	 */
+	dev_inst = (struct edac_device_instance *)
+		(((char *)dev_ctl) + ((unsigned long)dev_inst));
+	dev_blk = (struct edac_device_block *)
+		(((char *)dev_ctl) + ((unsigned long)dev_blk));
+	dev_attrib = (struct edac_dev_sysfs_block_attribute *)
+		(((char *)dev_ctl) + ((unsigned long)dev_attrib));
+	pvt = sz_private ? (((char *)dev_ctl) + ((unsigned long)pvt)) : NULL;
+
+	/* Begin storing the information into the control info structure */
+	dev_ctl->dev_idx = device_index;
+	dev_ctl->nr_instances = nr_instances;
+	dev_ctl->instances = dev_inst;
+	dev_ctl->pvt_info = pvt;
+
+	/* Default logging of CEs and UEs */
+	dev_ctl->log_ce = 1;
+	dev_ctl->log_ue = 1;
+
+	/* Name of this edac device */
+	snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
+
+	debugf4("%s() edac_dev=%p next after end=%p\n",
+		__func__, dev_ctl, pvt + sz_private );
+
+	/* Initialize every Instance */
+	for (instance = 0; instance < nr_instances; instance++) {
+		inst = &dev_inst[instance];
+		inst->ctl = dev_ctl;
+		inst->nr_blocks = nr_blocks;
+		blk_p = &dev_blk[instance * nr_blocks];
+		inst->blocks = blk_p;
+
+		/* name of this instance */
+		snprintf(inst->name, sizeof(inst->name),
+			 "%s%u", edac_device_name, instance);
+
+		/* Initialize every block in each instance */
+		for (block = 0; block < nr_blocks; block++) {
+			blk = &blk_p[block];
+			blk->instance = inst;
+			snprintf(blk->name, sizeof(blk->name),
+				 "%s%d", edac_block_name, block+offset_value);
+
+			debugf4("%s() instance=%d inst_p=%p block=#%d "
+				"block_p=%p name='%s'\n",
+				__func__, instance, inst, block,
+				blk, blk->name);
+
+			/* if there are NO attributes OR no attribute pointer
+			 * then continue on to next block iteration
+			 */
+			if ((nr_attrib == 0) || (attrib_spec == NULL))
+				continue;
+
+			/* setup the attribute array for this block */
+			blk->nr_attribs = nr_attrib;
+			attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
+			blk->block_attributes = attrib_p;
+
+			debugf4("%s() THIS BLOCK_ATTRIB=%p\n",
+				__func__, blk->block_attributes);
+
+			/* Initialize every user specified attribute in this
+			 * block with the data the caller passed in
+			 * Each block gets its own copy of pointers,
+			 * and its unique 'value'
+			 */
+			for (attr = 0; attr < nr_attrib; attr++) {
+				attrib = &attrib_p[attr];
+
+				/* populate the unique per attrib
+				 * with the code pointers and info
+				 */
+				attrib->attr = attrib_spec[attr].attr;
+				attrib->show = attrib_spec[attr].show;
+				attrib->store = attrib_spec[attr].store;
+
+				attrib->block = blk;	/* up link */
+
+				debugf4("%s() alloc-attrib=%p attrib_name='%s' "
+					"attrib-spec=%p spec-name=%s\n",
+					__func__, attrib, attrib->attr.name,
+					&attrib_spec[attr],
+					attrib_spec[attr].attr.name
+					);
+			}
+		}
+	}
+
+	/* Mark this instance as merely ALLOCATED */
+	dev_ctl->op_state = OP_ALLOC;
+
+	/*
+	 * Initialize the 'root' kobj for the edac_device controller
+	 */
+	err = edac_device_register_sysfs_main_kobj(dev_ctl);
+	if (err) {
+		kfree(dev_ctl);
+		return NULL;
+	}
+
+	/* at this point, the root kobj is valid, and in order to
+	 * 'free' the object, then the function:
+	 *	edac_device_unregister_sysfs_main_kobj() must be called
+	 * which will perform kobj unregistration and the actual free
+	 * will occur during the kobject callback operation
+	 */
+
+	return dev_ctl;
+}
+EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);
+
+/*
+ * edac_device_free_ctl_info()
+ *	frees the memory allocated by the edac_device_alloc_ctl_info()
+ *	function
+ */
+void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info)
+{
+	edac_device_unregister_sysfs_main_kobj(ctl_info);
+}
+EXPORT_SYMBOL_GPL(edac_device_free_ctl_info);
+
+/*
+ * find_edac_device_by_dev
+ *	scans the edac_device list for a specific 'struct device *'
+ *
+ *	lock to be held prior to call:	device_ctls_mutex
+ *
+ *	Return:
+ *		pointer to control structure managing 'dev'
+ *		NULL if not found on list
+ */
+static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
+{
+	struct edac_device_ctl_info *edac_dev;
+	struct list_head *item;
+
+	debugf0("%s()\n", __func__);
+
+	list_for_each(item, &edac_device_list) {
+		edac_dev = list_entry(item, struct edac_device_ctl_info, link);
+
+		if (edac_dev->dev == dev)
+			return edac_dev;
+	}
+
+	return NULL;
+}
+
+/*
+ * add_edac_dev_to_global_list
+ *	Before calling this function, caller must
+ *	assign a unique value to edac_dev->dev_idx.
+ *
+ *	lock to be held prior to call:	device_ctls_mutex
+ *
+ *	Return:
+ *		0 on success
+ *		1 on failure.
+ */
+static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev)
+{
+	struct list_head *item, *insert_before;
+	struct edac_device_ctl_info *rover;
+
+	insert_before = &edac_device_list;
+
+	/* Determine if already on the list */
+	rover = find_edac_device_by_dev(edac_dev->dev);
+	if (unlikely(rover != NULL))
+		goto fail0;
+
+	/* Insert in ascending order by 'dev_idx', so find position */
+	list_for_each(item, &edac_device_list) {
+		rover = list_entry(item, struct edac_device_ctl_info, link);
+
+		if (rover->dev_idx >= edac_dev->dev_idx) {
+			if (unlikely(rover->dev_idx == edac_dev->dev_idx))
+				goto fail1;
+
+			insert_before = item;
+			break;
+		}
+	}
+
+	list_add_tail_rcu(&edac_dev->link, insert_before);
+	return 0;
+
+fail0:
+	edac_printk(KERN_WARNING, EDAC_MC,
+			"%s (%s) %s %s already assigned %d\n",
+			dev_name(rover->dev), edac_dev_name(rover),
+			rover->mod_name, rover->ctl_name, rover->dev_idx);
+	return 1;
+
+fail1:
+	edac_printk(KERN_WARNING, EDAC_MC,
+			"bug in low-level driver: attempt to assign\n"
+			"    duplicate dev_idx %d in %s()\n", rover->dev_idx,
+			__func__);
+	return 1;
+}
+
+/*
+ * complete_edac_device_list_del
+ *
+ *	callback function when reference count is zero
+ */
+static void complete_edac_device_list_del(struct rcu_head *head)
+{
+	struct edac_device_ctl_info *edac_dev;
+
+	edac_dev = container_of(head, struct edac_device_ctl_info, rcu);
+	INIT_LIST_HEAD(&edac_dev->link);
+}
+
+/*
+ * del_edac_device_from_global_list
+ *
+ *	remove the RCU, setup for a callback call,
+ *	then wait for the callback to occur
+ */
+static void del_edac_device_from_global_list(struct edac_device_ctl_info
+						*edac_device)
+{
+	list_del_rcu(&edac_device->link);
+	call_rcu(&edac_device->rcu, complete_edac_device_list_del);
+	rcu_barrier();
+}
+
+/*
+ * edac_device_workq_function
+ *	performs the operation scheduled by a workq request
+ *
+ *	this workq is embedded within an edac_device_ctl_info
+ *	structure, that needs to be polled for possible error events.
+ *
+ *	This operation is to acquire the list mutex lock
+ *	(thus preventing insertation or deletion)
+ *	and then call the device's poll function IFF this device is
+ *	running polled and there is a poll function defined.
+ */
+static void edac_device_workq_function(struct work_struct *work_req)
+{
+	struct delayed_work *d_work = to_delayed_work(work_req);
+	struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work);
+
+	mutex_lock(&device_ctls_mutex);
+
+	/* If we are being removed, bail out immediately */
+	if (edac_dev->op_state == OP_OFFLINE) {
+		mutex_unlock(&device_ctls_mutex);
+		return;
+	}
+
+	/* Only poll controllers that are running polled and have a check */
+	if ((edac_dev->op_state == OP_RUNNING_POLL) &&
+		(edac_dev->edac_check != NULL)) {
+			edac_dev->edac_check(edac_dev);
+	}
+
+	mutex_unlock(&device_ctls_mutex);
+
+	/* Reschedule the workq for the next time period to start again
+	 * if the number of msec is for 1 sec, then adjust to the next
+	 * whole one second to save timers fireing all over the period
+	 * between integral seconds
+	 */
+	if (edac_dev->poll_msec == 1000)
+		queue_delayed_work(edac_workqueue, &edac_dev->work,
+				round_jiffies_relative(edac_dev->delay));
+	else
+		queue_delayed_work(edac_workqueue, &edac_dev->work,
+				edac_dev->delay);
+}
+
+/*
+ * edac_device_workq_setup
+ *	initialize a workq item for this edac_device instance
+ *	passing in the new delay period in msec
+ */
+void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
+				unsigned msec)
+{
+	debugf0("%s()\n", __func__);
+
+	/* take the arg 'msec' and set it into the control structure
+	 * to used in the time period calculation
+	 * then calc the number of jiffies that represents
+	 */
+	edac_dev->poll_msec = msec;
+	edac_dev->delay = msecs_to_jiffies(msec);
+
+	INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
+
+	/* optimize here for the 1 second case, which will be normal value, to
+	 * fire ON the 1 second time event. This helps reduce all sorts of
+	 * timers firing on sub-second basis, while they are happy
+	 * to fire together on the 1 second exactly
+	 */
+	if (edac_dev->poll_msec == 1000)
+		queue_delayed_work(edac_workqueue, &edac_dev->work,
+				round_jiffies_relative(edac_dev->delay));
+	else
+		queue_delayed_work(edac_workqueue, &edac_dev->work,
+				edac_dev->delay);
+}
+
+/*
+ * edac_device_workq_teardown
+ *	stop the workq processing on this edac_dev
+ */
+void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
+{
+	int status;
+
+	status = cancel_delayed_work(&edac_dev->work);
+	if (status == 0) {
+		/* workq instance might be running, wait for it */
+		flush_workqueue(edac_workqueue);
+	}
+}
+
+/*
+ * edac_device_reset_delay_period
+ *
+ *	need to stop any outstanding workq queued up at this time
+ *	because we will be resetting the sleep time.
+ *	Then restart the workq on the new delay
+ */
+void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
+					unsigned long value)
+{
+	/* cancel the current workq request, without the mutex lock */
+	edac_device_workq_teardown(edac_dev);
+
+	/* acquire the mutex before doing the workq setup */
+	mutex_lock(&device_ctls_mutex);
+
+	/* restart the workq request, with new delay value */
+	edac_device_workq_setup(edac_dev, value);
+
+	mutex_unlock(&device_ctls_mutex);
+}
+
+/*
+ * edac_device_alloc_index: Allocate a unique device index number
+ *
+ * Return:
+ *	allocated index number
+ */
+int edac_device_alloc_index(void)
+{
+	static atomic_t device_indexes = ATOMIC_INIT(0);
+
+	return atomic_inc_return(&device_indexes) - 1;
+}
+EXPORT_SYMBOL_GPL(edac_device_alloc_index);
+
+/**
+ * edac_device_add_device: Insert the 'edac_dev' structure into the
+ * edac_device global list and create sysfs entries associated with
+ * edac_device structure.
+ * @edac_device: pointer to the edac_device structure to be added to the list
+ * 'edac_device' structure.
+ *
+ * Return:
+ *	0	Success
+ *	!0	Failure
+ */
+int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
+{
+	debugf0("%s()\n", __func__);
+
+#ifdef CONFIG_EDAC_DEBUG
+	if (edac_debug_level >= 3)
+		edac_device_dump_device(edac_dev);
+#endif
+	mutex_lock(&device_ctls_mutex);
+
+	if (add_edac_dev_to_global_list(edac_dev))
+		goto fail0;
+
+	/* set load time so that error rate can be tracked */
+	edac_dev->start_time = jiffies;
+
+	/* create this instance's sysfs entries */
+	if (edac_device_create_sysfs(edac_dev)) {
+		edac_device_printk(edac_dev, KERN_WARNING,
+					"failed to create sysfs device\n");
+		goto fail1;
+	}
+
+	/* If there IS a check routine, then we are running POLLED */
+	if (edac_dev->edac_check != NULL) {
+		/* This instance is NOW RUNNING */
+		edac_dev->op_state = OP_RUNNING_POLL;
+
+		/*
+		 * enable workq processing on this instance,
+		 * default = 1000 msec
+		 */
+		edac_device_workq_setup(edac_dev, 1000);
+	} else {
+		edac_dev->op_state = OP_RUNNING_INTERRUPT;
+	}
+
+	/* Report action taken */
+	edac_device_printk(edac_dev, KERN_INFO,
+				"Giving out device to module '%s' controller "
+				"'%s': DEV '%s' (%s)\n",
+				edac_dev->mod_name,
+				edac_dev->ctl_name,
+				edac_dev_name(edac_dev),
+				edac_op_state_to_string(edac_dev->op_state));
+
+	mutex_unlock(&device_ctls_mutex);
+	return 0;
+
+fail1:
+	/* Some error, so remove the entry from the lsit */
+	del_edac_device_from_global_list(edac_dev);
+
+fail0:
+	mutex_unlock(&device_ctls_mutex);
+	return 1;
+}
+EXPORT_SYMBOL_GPL(edac_device_add_device);
+
+/**
+ * edac_device_del_device:
+ *	Remove sysfs entries for specified edac_device structure and
+ *	then remove edac_device structure from global list
+ *
+ * @pdev:
+ *	Pointer to 'struct device' representing edac_device
+ *	structure to remove.
+ *
+ * Return:
+ *	Pointer to removed edac_device structure,
+ *	OR NULL if device not found.
+ */
+struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
+{
+	struct edac_device_ctl_info *edac_dev;
+
+	debugf0("%s()\n", __func__);
+
+	mutex_lock(&device_ctls_mutex);
+
+	/* Find the structure on the list, if not there, then leave */
+	edac_dev = find_edac_device_by_dev(dev);
+	if (edac_dev == NULL) {
+		mutex_unlock(&device_ctls_mutex);
+		return NULL;
+	}
+
+	/* mark this instance as OFFLINE */
+	edac_dev->op_state = OP_OFFLINE;
+
+	/* deregister from global list */
+	del_edac_device_from_global_list(edac_dev);
+
+	mutex_unlock(&device_ctls_mutex);
+
+	/* clear workq processing on this instance */
+	edac_device_workq_teardown(edac_dev);
+
+	/* Tear down the sysfs entries for this instance */
+	edac_device_remove_sysfs(edac_dev);
+
+	edac_printk(KERN_INFO, EDAC_MC,
+		"Removed device %d for %s %s: DEV %s\n",
+		edac_dev->dev_idx,
+		edac_dev->mod_name, edac_dev->ctl_name, edac_dev_name(edac_dev));
+
+	return edac_dev;
+}
+EXPORT_SYMBOL_GPL(edac_device_del_device);
+
+static inline int edac_device_get_log_ce(struct edac_device_ctl_info *edac_dev)
+{
+	return edac_dev->log_ce;
+}
+
+static inline int edac_device_get_log_ue(struct edac_device_ctl_info *edac_dev)
+{
+	return edac_dev->log_ue;
+}
+
+static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
+					*edac_dev)
+{
+	return edac_dev->panic_on_ue;
+}
+
+/*
+ * edac_device_handle_ce
+ *	perform a common output and handling of an 'edac_dev' CE event
+ */
+void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
+			int inst_nr, int block_nr, const char *msg)
+{
+	struct edac_device_instance *instance;
+	struct edac_device_block *block = NULL;
+
+	if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
+		edac_device_printk(edac_dev, KERN_ERR,
+				"INTERNAL ERROR: 'instance' out of range "
+				"(%d >= %d)\n", inst_nr,
+				edac_dev->nr_instances);
+		return;
+	}
+
+	instance = edac_dev->instances + inst_nr;
+
+	if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
+		edac_device_printk(edac_dev, KERN_ERR,
+				"INTERNAL ERROR: instance %d 'block' "
+				"out of range (%d >= %d)\n",
+				inst_nr, block_nr,
+				instance->nr_blocks);
+		return;
+	}
+
+	if (instance->nr_blocks > 0) {
+		block = instance->blocks + block_nr;
+		block->counters.ce_count++;
+	}
+
+	/* Propogate the count up the 'totals' tree */
+	instance->counters.ce_count++;
+	edac_dev->counters.ce_count++;
+
+	if (edac_device_get_log_ce(edac_dev))
+		edac_device_printk(edac_dev, KERN_WARNING,
+				"CE: %s instance: %s block: %s '%s'\n",
+				edac_dev->ctl_name, instance->name,
+				block ? block->name : "N/A", msg);
+}
+EXPORT_SYMBOL_GPL(edac_device_handle_ce);
+
+/*
+ * edac_device_handle_ue
+ *	perform a common output and handling of an 'edac_dev' UE event
+ */
+void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
+			int inst_nr, int block_nr, const char *msg)
+{
+	struct edac_device_instance *instance;
+	struct edac_device_block *block = NULL;
+
+	if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
+		edac_device_printk(edac_dev, KERN_ERR,
+				"INTERNAL ERROR: 'instance' out of range "
+				"(%d >= %d)\n", inst_nr,
+				edac_dev->nr_instances);
+		return;
+	}
+
+	instance = edac_dev->instances + inst_nr;
+
+	if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
+		edac_device_printk(edac_dev, KERN_ERR,
+				"INTERNAL ERROR: instance %d 'block' "
+				"out of range (%d >= %d)\n",
+				inst_nr, block_nr,
+				instance->nr_blocks);
+		return;
+	}
+
+	if (instance->nr_blocks > 0) {
+		block = instance->blocks + block_nr;
+		block->counters.ue_count++;
+	}
+
+	/* Propogate the count up the 'totals' tree */
+	instance->counters.ue_count++;
+	edac_dev->counters.ue_count++;
+
+	if (edac_device_get_log_ue(edac_dev))
+		edac_device_printk(edac_dev, KERN_EMERG,
+				"UE: %s instance: %s block: %s '%s'\n",
+				edac_dev->ctl_name, instance->name,
+				block ? block->name : "N/A", msg);
+
+	if (edac_device_get_panic_on_ue(edac_dev))
+		panic("EDAC %s: UE instance: %s block %s '%s'\n",
+			edac_dev->ctl_name, instance->name,
+			block ? block->name : "N/A", msg);
+}
+EXPORT_SYMBOL_GPL(edac_device_handle_ue);
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
new file mode 100644
index 0000000..5376457
--- /dev/null
+++ b/drivers/edac/edac_device_sysfs.c
@@ -0,0 +1,881 @@
+/*
+ * file for managing the edac_device class of devices for EDAC
+ *
+ * (C) 2007 SoftwareBitMaker (http://www.softwarebitmaker.com)
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written Doug Thompson <norsk5@xmission.com>
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#define EDAC_DEVICE_SYMLINK	"device"
+
+#define to_edacdev(k) container_of(k, struct edac_device_ctl_info, kobj)
+#define to_edacdev_attr(a) container_of(a, struct edacdev_attribute, attr)
+
+
+/*
+ * Set of edac_device_ctl_info attribute store/show functions
+ */
+
+/* 'log_ue' */
+static ssize_t edac_device_ctl_log_ue_show(struct edac_device_ctl_info
+					*ctl_info, char *data)
+{
+	return sprintf(data, "%u\n", ctl_info->log_ue);
+}
+
+static ssize_t edac_device_ctl_log_ue_store(struct edac_device_ctl_info
+					*ctl_info, const char *data,
+					size_t count)
+{
+	/* if parameter is zero, turn off flag, if non-zero turn on flag */
+	ctl_info->log_ue = (simple_strtoul(data, NULL, 0) != 0);
+
+	return count;
+}
+
+/* 'log_ce' */
+static ssize_t edac_device_ctl_log_ce_show(struct edac_device_ctl_info
+					*ctl_info, char *data)
+{
+	return sprintf(data, "%u\n", ctl_info->log_ce);
+}
+
+static ssize_t edac_device_ctl_log_ce_store(struct edac_device_ctl_info
+					*ctl_info, const char *data,
+					size_t count)
+{
+	/* if parameter is zero, turn off flag, if non-zero turn on flag */
+	ctl_info->log_ce = (simple_strtoul(data, NULL, 0) != 0);
+
+	return count;
+}
+
+/* 'panic_on_ue' */
+static ssize_t edac_device_ctl_panic_on_ue_show(struct edac_device_ctl_info
+						*ctl_info, char *data)
+{
+	return sprintf(data, "%u\n", ctl_info->panic_on_ue);
+}
+
+static ssize_t edac_device_ctl_panic_on_ue_store(struct edac_device_ctl_info
+						 *ctl_info, const char *data,
+						 size_t count)
+{
+	/* if parameter is zero, turn off flag, if non-zero turn on flag */
+	ctl_info->panic_on_ue = (simple_strtoul(data, NULL, 0) != 0);
+
+	return count;
+}
+
+/* 'poll_msec' show and store functions*/
+static ssize_t edac_device_ctl_poll_msec_show(struct edac_device_ctl_info
+					*ctl_info, char *data)
+{
+	return sprintf(data, "%u\n", ctl_info->poll_msec);
+}
+
+static ssize_t edac_device_ctl_poll_msec_store(struct edac_device_ctl_info
+					*ctl_info, const char *data,
+					size_t count)
+{
+	unsigned long value;
+
+	/* get the value and enforce that it is non-zero, must be at least
+	 * one millisecond for the delay period, between scans
+	 * Then cancel last outstanding delay for the work request
+	 * and set a new one.
+	 */
+	value = simple_strtoul(data, NULL, 0);
+	edac_device_reset_delay_period(ctl_info, value);
+
+	return count;
+}
+
+/* edac_device_ctl_info specific attribute structure */
+struct ctl_info_attribute {
+	struct attribute attr;
+	ssize_t(*show) (struct edac_device_ctl_info *, char *);
+	ssize_t(*store) (struct edac_device_ctl_info *, const char *, size_t);
+};
+
+#define to_ctl_info(k) container_of(k, struct edac_device_ctl_info, kobj)
+#define to_ctl_info_attr(a) container_of(a,struct ctl_info_attribute,attr)
+
+/* Function to 'show' fields from the edac_dev 'ctl_info' structure */
+static ssize_t edac_dev_ctl_info_show(struct kobject *kobj,
+				struct attribute *attr, char *buffer)
+{
+	struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj);
+	struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr);
+
+	if (ctl_info_attr->show)
+		return ctl_info_attr->show(edac_dev, buffer);
+	return -EIO;
+}
+
+/* Function to 'store' fields into the edac_dev 'ctl_info' structure */
+static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
+				struct attribute *attr,
+				const char *buffer, size_t count)
+{
+	struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj);
+	struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr);
+
+	if (ctl_info_attr->store)
+		return ctl_info_attr->store(edac_dev, buffer, count);
+	return -EIO;
+}
+
+/* edac_dev file operations for an 'ctl_info' */
+static struct sysfs_ops device_ctl_info_ops = {
+	.show = edac_dev_ctl_info_show,
+	.store = edac_dev_ctl_info_store
+};
+
+#define CTL_INFO_ATTR(_name,_mode,_show,_store)        \
+static struct ctl_info_attribute attr_ctl_info_##_name = {      \
+	.attr = {.name = __stringify(_name), .mode = _mode },   \
+	.show   = _show,                                        \
+	.store  = _store,                                       \
+};
+
+/* Declare the various ctl_info attributes here and their respective ops */
+CTL_INFO_ATTR(log_ue, S_IRUGO | S_IWUSR,
+	edac_device_ctl_log_ue_show, edac_device_ctl_log_ue_store);
+CTL_INFO_ATTR(log_ce, S_IRUGO | S_IWUSR,
+	edac_device_ctl_log_ce_show, edac_device_ctl_log_ce_store);
+CTL_INFO_ATTR(panic_on_ue, S_IRUGO | S_IWUSR,
+	edac_device_ctl_panic_on_ue_show,
+	edac_device_ctl_panic_on_ue_store);
+CTL_INFO_ATTR(poll_msec, S_IRUGO | S_IWUSR,
+	edac_device_ctl_poll_msec_show, edac_device_ctl_poll_msec_store);
+
+/* Base Attributes of the EDAC_DEVICE ECC object */
+static struct ctl_info_attribute *device_ctrl_attr[] = {
+	&attr_ctl_info_panic_on_ue,
+	&attr_ctl_info_log_ue,
+	&attr_ctl_info_log_ce,
+	&attr_ctl_info_poll_msec,
+	NULL,
+};
+
+/*
+ * edac_device_ctrl_master_release
+ *
+ *	called when the reference count for the 'main' kobj
+ *	for a edac_device control struct reaches zero
+ *
+ *	Reference count model:
+ *		One 'main' kobject for each control structure allocated.
+ *		That main kobj is initially set to one AND
+ *		the reference count for the EDAC 'core' module is
+ *		bumped by one, thus added 'keep in memory' dependency.
+ *
+ *		Each new internal kobj (in instances and blocks) then
+ *		bumps the 'main' kobject.
+ *
+ *		When they are released their release functions decrement
+ *		the 'main' kobj.
+ *
+ *		When the main kobj reaches zero (0) then THIS function
+ *		is called which then decrements the EDAC 'core' module.
+ *		When the module reference count reaches zero then the
+ *		module no longer has dependency on keeping the release
+ *		function code in memory and module can be unloaded.
+ *
+ *		This will support several control objects as well, each
+ *		with its own 'main' kobj.
+ */
+static void edac_device_ctrl_master_release(struct kobject *kobj)
+{
+	struct edac_device_ctl_info *edac_dev = to_edacdev(kobj);
+
+	debugf4("%s() control index=%d\n", __func__, edac_dev->dev_idx);
+
+	/* decrement the EDAC CORE module ref count */
+	module_put(edac_dev->owner);
+
+	/* free the control struct containing the 'main' kobj
+	 * passed in to this routine
+	 */
+	kfree(edac_dev);
+}
+
+/* ktype for the main (master) kobject */
+static struct kobj_type ktype_device_ctrl = {
+	.release = edac_device_ctrl_master_release,
+	.sysfs_ops = &device_ctl_info_ops,
+	.default_attrs = (struct attribute **)device_ctrl_attr,
+};
+
+/*
+ * edac_device_register_sysfs_main_kobj
+ *
+ *	perform the high level setup for the new edac_device instance
+ *
+ * Return:  0 SUCCESS
+ *         !0 FAILURE
+ */
+int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
+{
+	struct sysdev_class *edac_class;
+	int err;
+
+	debugf1("%s()\n", __func__);
+
+	/* get the /sys/devices/system/edac reference */
+	edac_class = edac_get_edac_class();
+	if (edac_class == NULL) {
+		debugf1("%s() no edac_class error\n", __func__);
+		err = -ENODEV;
+		goto err_out;
+	}
+
+	/* Point to the 'edac_class' this instance 'reports' to */
+	edac_dev->edac_class = edac_class;
+
+	/* Init the devices's kobject */
+	memset(&edac_dev->kobj, 0, sizeof(struct kobject));
+
+	/* Record which module 'owns' this control structure
+	 * and bump the ref count of the module
+	 */
+	edac_dev->owner = THIS_MODULE;
+
+	if (!try_module_get(edac_dev->owner)) {
+		err = -ENODEV;
+		goto err_out;
+	}
+
+	/* register */
+	err = kobject_init_and_add(&edac_dev->kobj, &ktype_device_ctrl,
+				   &edac_class->kset.kobj,
+				   "%s", edac_dev->name);
+	if (err) {
+		debugf1("%s()Failed to register '.../edac/%s'\n",
+			__func__, edac_dev->name);
+		goto err_kobj_reg;
+	}
+	kobject_uevent(&edac_dev->kobj, KOBJ_ADD);
+
+	/* At this point, to 'free' the control struct,
+	 * edac_device_unregister_sysfs_main_kobj() must be used
+	 */
+
+	debugf4("%s() Registered '.../edac/%s' kobject\n",
+		__func__, edac_dev->name);
+
+	return 0;
+
+	/* Error exit stack */
+err_kobj_reg:
+	module_put(edac_dev->owner);
+
+err_out:
+	return err;
+}
+
+/*
+ * edac_device_unregister_sysfs_main_kobj:
+ *	the '..../edac/<name>' kobject
+ */
+void edac_device_unregister_sysfs_main_kobj(
+					struct edac_device_ctl_info *edac_dev)
+{
+	debugf0("%s()\n", __func__);
+	debugf4("%s() name of kobject is: %s\n",
+		__func__, kobject_name(&edac_dev->kobj));
+
+	/*
+	 * Unregister the edac device's kobject and
+	 * allow for reference count to reach 0 at which point
+	 * the callback will be called to:
+	 *   a) module_put() this module
+	 *   b) 'kfree' the memory
+	 */
+	kobject_put(&edac_dev->kobj);
+}
+
+/* edac_dev -> instance information */
+
+/*
+ * Set of low-level instance attribute show functions
+ */
+static ssize_t instance_ue_count_show(struct edac_device_instance *instance,
+				char *data)
+{
+	return sprintf(data, "%u\n", instance->counters.ue_count);
+}
+
+static ssize_t instance_ce_count_show(struct edac_device_instance *instance,
+				char *data)
+{
+	return sprintf(data, "%u\n", instance->counters.ce_count);
+}
+
+#define to_instance(k) container_of(k, struct edac_device_instance, kobj)
+#define to_instance_attr(a) container_of(a,struct instance_attribute,attr)
+
+/* DEVICE instance kobject release() function */
+static void edac_device_ctrl_instance_release(struct kobject *kobj)
+{
+	struct edac_device_instance *instance;
+
+	debugf1("%s()\n", __func__);
+
+	/* map from this kobj to the main control struct
+	 * and then dec the main kobj count
+	 */
+	instance = to_instance(kobj);
+	kobject_put(&instance->ctl->kobj);
+}
+
+/* instance specific attribute structure */
+struct instance_attribute {
+	struct attribute attr;
+	ssize_t(*show) (struct edac_device_instance *, char *);
+	ssize_t(*store) (struct edac_device_instance *, const char *, size_t);
+};
+
+/* Function to 'show' fields from the edac_dev 'instance' structure */
+static ssize_t edac_dev_instance_show(struct kobject *kobj,
+				struct attribute *attr, char *buffer)
+{
+	struct edac_device_instance *instance = to_instance(kobj);
+	struct instance_attribute *instance_attr = to_instance_attr(attr);
+
+	if (instance_attr->show)
+		return instance_attr->show(instance, buffer);
+	return -EIO;
+}
+
+/* Function to 'store' fields into the edac_dev 'instance' structure */
+static ssize_t edac_dev_instance_store(struct kobject *kobj,
+				struct attribute *attr,
+				const char *buffer, size_t count)
+{
+	struct edac_device_instance *instance = to_instance(kobj);
+	struct instance_attribute *instance_attr = to_instance_attr(attr);
+
+	if (instance_attr->store)
+		return instance_attr->store(instance, buffer, count);
+	return -EIO;
+}
+
+/* edac_dev file operations for an 'instance' */
+static struct sysfs_ops device_instance_ops = {
+	.show = edac_dev_instance_show,
+	.store = edac_dev_instance_store
+};
+
+#define INSTANCE_ATTR(_name,_mode,_show,_store)        \
+static struct instance_attribute attr_instance_##_name = {      \
+	.attr = {.name = __stringify(_name), .mode = _mode },   \
+	.show   = _show,                                        \
+	.store  = _store,                                       \
+};
+
+/*
+ * Define attributes visible for the edac_device instance object
+ *	Each contains a pointer to a show and an optional set
+ *	function pointer that does the low level output/input
+ */
+INSTANCE_ATTR(ce_count, S_IRUGO, instance_ce_count_show, NULL);
+INSTANCE_ATTR(ue_count, S_IRUGO, instance_ue_count_show, NULL);
+
+/* list of edac_dev 'instance' attributes */
+static struct instance_attribute *device_instance_attr[] = {
+	&attr_instance_ce_count,
+	&attr_instance_ue_count,
+	NULL,
+};
+
+/* The 'ktype' for each edac_dev 'instance' */
+static struct kobj_type ktype_instance_ctrl = {
+	.release = edac_device_ctrl_instance_release,
+	.sysfs_ops = &device_instance_ops,
+	.default_attrs = (struct attribute **)device_instance_attr,
+};
+
+/* edac_dev -> instance -> block information */
+
+#define to_block(k) container_of(k, struct edac_device_block, kobj)
+#define to_block_attr(a) \
+	container_of(a, struct edac_dev_sysfs_block_attribute, attr)
+
+/*
+ * Set of low-level block attribute show functions
+ */
+static ssize_t block_ue_count_show(struct kobject *kobj,
+					struct attribute *attr, char *data)
+{
+	struct edac_device_block *block = to_block(kobj);
+
+	return sprintf(data, "%u\n", block->counters.ue_count);
+}
+
+static ssize_t block_ce_count_show(struct kobject *kobj,
+					struct attribute *attr, char *data)
+{
+	struct edac_device_block *block = to_block(kobj);
+
+	return sprintf(data, "%u\n", block->counters.ce_count);
+}
+
+/* DEVICE block kobject release() function */
+static void edac_device_ctrl_block_release(struct kobject *kobj)
+{
+	struct edac_device_block *block;
+
+	debugf1("%s()\n", __func__);
+
+	/* get the container of the kobj */
+	block = to_block(kobj);
+
+	/* map from 'block kobj' to 'block->instance->controller->main_kobj'
+	 * now 'release' the block kobject
+	 */
+	kobject_put(&block->instance->ctl->kobj);
+}
+
+
+/* Function to 'show' fields from the edac_dev 'block' structure */
+static ssize_t edac_dev_block_show(struct kobject *kobj,
+				struct attribute *attr, char *buffer)
+{
+	struct edac_dev_sysfs_block_attribute *block_attr =
+						to_block_attr(attr);
+
+	if (block_attr->show)
+		return block_attr->show(kobj, attr, buffer);
+	return -EIO;
+}
+
+/* Function to 'store' fields into the edac_dev 'block' structure */
+static ssize_t edac_dev_block_store(struct kobject *kobj,
+				struct attribute *attr,
+				const char *buffer, size_t count)
+{
+	struct edac_dev_sysfs_block_attribute *block_attr;
+
+	block_attr = to_block_attr(attr);
+
+	if (block_attr->store)
+		return block_attr->store(kobj, attr, buffer, count);
+	return -EIO;
+}
+
+/* edac_dev file operations for a 'block' */
+static struct sysfs_ops device_block_ops = {
+	.show = edac_dev_block_show,
+	.store = edac_dev_block_store
+};
+
+#define BLOCK_ATTR(_name,_mode,_show,_store)        \
+static struct edac_dev_sysfs_block_attribute attr_block_##_name = {	\
+	.attr = {.name = __stringify(_name), .mode = _mode },   \
+	.show   = _show,                                        \
+	.store  = _store,                                       \
+};
+
+BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show, NULL);
+BLOCK_ATTR(ue_count, S_IRUGO, block_ue_count_show, NULL);
+
+/* list of edac_dev 'block' attributes */
+static struct edac_dev_sysfs_block_attribute *device_block_attr[] = {
+	&attr_block_ce_count,
+	&attr_block_ue_count,
+	NULL,
+};
+
+/* The 'ktype' for each edac_dev 'block' */
+static struct kobj_type ktype_block_ctrl = {
+	.release = edac_device_ctrl_block_release,
+	.sysfs_ops = &device_block_ops,
+	.default_attrs = (struct attribute **)device_block_attr,
+};
+
+/* block ctor/dtor  code */
+
+/*
+ * edac_device_create_block
+ */
+static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
+				struct edac_device_instance *instance,
+				struct edac_device_block *block)
+{
+	int i;
+	int err;
+	struct edac_dev_sysfs_block_attribute *sysfs_attrib;
+	struct kobject *main_kobj;
+
+	debugf4("%s() Instance '%s' inst_p=%p  block '%s'  block_p=%p\n",
+		__func__, instance->name, instance, block->name, block);
+	debugf4("%s() block kobj=%p  block kobj->parent=%p\n",
+		__func__, &block->kobj, &block->kobj.parent);
+
+	/* init this block's kobject */
+	memset(&block->kobj, 0, sizeof(struct kobject));
+
+	/* bump the main kobject's reference count for this controller
+	 * and this instance is dependant on the main
+	 */
+	main_kobj = kobject_get(&edac_dev->kobj);
+	if (!main_kobj) {
+		err = -ENODEV;
+		goto err_out;
+	}
+
+	/* Add this block's kobject */
+	err = kobject_init_and_add(&block->kobj, &ktype_block_ctrl,
+				   &instance->kobj,
+				   "%s", block->name);
+	if (err) {
+		debugf1("%s() Failed to register instance '%s'\n",
+			__func__, block->name);
+		kobject_put(main_kobj);
+		err = -ENODEV;
+		goto err_out;
+	}
+
+	/* If there are driver level block attributes, then added them
+	 * to the block kobject
+	 */
+	sysfs_attrib = block->block_attributes;
+	if (sysfs_attrib && block->nr_attribs) {
+		for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
+
+			debugf4("%s() creating block attrib='%s' "
+				"attrib->%p to kobj=%p\n",
+				__func__,
+				sysfs_attrib->attr.name,
+				sysfs_attrib, &block->kobj);
+
+			/* Create each block_attribute file */
+			err = sysfs_create_file(&block->kobj,
+				&sysfs_attrib->attr);
+			if (err)
+				goto err_on_attrib;
+		}
+	}
+	kobject_uevent(&block->kobj, KOBJ_ADD);
+
+	return 0;
+
+	/* Error unwind stack */
+err_on_attrib:
+	kobject_put(&block->kobj);
+
+err_out:
+	return err;
+}
+
+/*
+ * edac_device_delete_block(edac_dev,block);
+ */
+static void edac_device_delete_block(struct edac_device_ctl_info *edac_dev,
+				struct edac_device_block *block)
+{
+	struct edac_dev_sysfs_block_attribute *sysfs_attrib;
+	int i;
+
+	/* if this block has 'attributes' then we need to iterate over the list
+	 * and 'remove' the attributes on this block
+	 */
+	sysfs_attrib = block->block_attributes;
+	if (sysfs_attrib && block->nr_attribs) {
+		for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
+
+			/* remove each block_attrib file */
+			sysfs_remove_file(&block->kobj,
+				(struct attribute *) sysfs_attrib);
+		}
+	}
+
+	/* unregister this block's kobject, SEE:
+	 *	edac_device_ctrl_block_release() callback operation
+	 */
+	kobject_put(&block->kobj);
+}
+
+/* instance ctor/dtor code */
+
+/*
+ * edac_device_create_instance
+ *	create just one instance of an edac_device 'instance'
+ */
+static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
+				int idx)
+{
+	int i, j;
+	int err;
+	struct edac_device_instance *instance;
+	struct kobject *main_kobj;
+
+	instance = &edac_dev->instances[idx];
+
+	/* Init the instance's kobject */
+	memset(&instance->kobj, 0, sizeof(struct kobject));
+
+	instance->ctl = edac_dev;
+
+	/* bump the main kobject's reference count for this controller
+	 * and this instance is dependant on the main
+	 */
+	main_kobj = kobject_get(&edac_dev->kobj);
+	if (!main_kobj) {
+		err = -ENODEV;
+		goto err_out;
+	}
+
+	/* Formally register this instance's kobject under the edac_device */
+	err = kobject_init_and_add(&instance->kobj, &ktype_instance_ctrl,
+				   &edac_dev->kobj, "%s", instance->name);
+	if (err != 0) {
+		debugf2("%s() Failed to register instance '%s'\n",
+			__func__, instance->name);
+		kobject_put(main_kobj);
+		goto err_out;
+	}
+
+	debugf4("%s() now register '%d' blocks for instance %d\n",
+		__func__, instance->nr_blocks, idx);
+
+	/* register all blocks of this instance */
+	for (i = 0; i < instance->nr_blocks; i++) {
+		err = edac_device_create_block(edac_dev, instance,
+						&instance->blocks[i]);
+		if (err) {
+			/* If any fail, remove all previous ones */
+			for (j = 0; j < i; j++)
+				edac_device_delete_block(edac_dev,
+							&instance->blocks[j]);
+			goto err_release_instance_kobj;
+		}
+	}
+	kobject_uevent(&instance->kobj, KOBJ_ADD);
+
+	debugf4("%s() Registered instance %d '%s' kobject\n",
+		__func__, idx, instance->name);
+
+	return 0;
+
+	/* error unwind stack */
+err_release_instance_kobj:
+	kobject_put(&instance->kobj);
+
+err_out:
+	return err;
+}
+
+/*
+ * edac_device_remove_instance
+ *	remove an edac_device instance
+ */
+static void edac_device_delete_instance(struct edac_device_ctl_info *edac_dev,
+					int idx)
+{
+	struct edac_device_instance *instance;
+	int i;
+
+	instance = &edac_dev->instances[idx];
+
+	/* unregister all blocks in this instance */
+	for (i = 0; i < instance->nr_blocks; i++)
+		edac_device_delete_block(edac_dev, &instance->blocks[i]);
+
+	/* unregister this instance's kobject, SEE:
+	 *	edac_device_ctrl_instance_release() for callback operation
+	 */
+	kobject_put(&instance->kobj);
+}
+
+/*
+ * edac_device_create_instances
+ *	create the first level of 'instances' for this device
+ *	(ie  'cache' might have 'cache0', 'cache1', 'cache2', etc
+ */
+static int edac_device_create_instances(struct edac_device_ctl_info *edac_dev)
+{
+	int i, j;
+	int err;
+
+	debugf0("%s()\n", __func__);
+
+	/* iterate over creation of the instances */
+	for (i = 0; i < edac_dev->nr_instances; i++) {
+		err = edac_device_create_instance(edac_dev, i);
+		if (err) {
+			/* unwind previous instances on error */
+			for (j = 0; j < i; j++)
+				edac_device_delete_instance(edac_dev, j);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * edac_device_delete_instances(edac_dev);
+ *	unregister all the kobjects of the instances
+ */
+static void edac_device_delete_instances(struct edac_device_ctl_info *edac_dev)
+{
+	int i;
+
+	/* iterate over creation of the instances */
+	for (i = 0; i < edac_dev->nr_instances; i++)
+		edac_device_delete_instance(edac_dev, i);
+}
+
+/* edac_dev sysfs ctor/dtor  code */
+
+/*
+ * edac_device_add_main_sysfs_attributes
+ *	add some attributes to this instance's main kobject
+ */
+static int edac_device_add_main_sysfs_attributes(
+			struct edac_device_ctl_info *edac_dev)
+{
+	struct edac_dev_sysfs_attribute *sysfs_attrib;
+	int err = 0;
+
+	sysfs_attrib = edac_dev->sysfs_attributes;
+	if (sysfs_attrib) {
+		/* iterate over the array and create an attribute for each
+		 * entry in the list
+		 */
+		while (sysfs_attrib->attr.name != NULL) {
+			err = sysfs_create_file(&edac_dev->kobj,
+				(struct attribute*) sysfs_attrib);
+			if (err)
+				goto err_out;
+
+			sysfs_attrib++;
+		}
+	}
+
+err_out:
+	return err;
+}
+
+/*
+ * edac_device_remove_main_sysfs_attributes
+ *	remove any attributes to this instance's main kobject
+ */
+static void edac_device_remove_main_sysfs_attributes(
+			struct edac_device_ctl_info *edac_dev)
+{
+	struct edac_dev_sysfs_attribute *sysfs_attrib;
+
+	/* if there are main attributes, defined, remove them. First,
+	 * point to the start of the array and iterate over it
+	 * removing each attribute listed from this device's instance's kobject
+	 */
+	sysfs_attrib = edac_dev->sysfs_attributes;
+	if (sysfs_attrib) {
+		while (sysfs_attrib->attr.name != NULL) {
+			sysfs_remove_file(&edac_dev->kobj,
+					(struct attribute *) sysfs_attrib);
+			sysfs_attrib++;
+		}
+	}
+}
+
+/*
+ * edac_device_create_sysfs() Constructor
+ *
+ * accept a created edac_device control structure
+ * and 'export' it to sysfs. The 'main' kobj should already have been
+ * created. 'instance' and 'block' kobjects should be registered
+ * along with any 'block' attributes from the low driver. In addition,
+ * the main attributes (if any) are connected to the main kobject of
+ * the control structure.
+ *
+ * Return:
+ *	0	Success
+ *	!0	Failure
+ */
+int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev)
+{
+	int err;
+	struct kobject *edac_kobj = &edac_dev->kobj;
+
+	debugf0("%s() idx=%d\n", __func__, edac_dev->dev_idx);
+
+	/*  go create any main attributes callers wants */
+	err = edac_device_add_main_sysfs_attributes(edac_dev);
+	if (err) {
+		debugf0("%s() failed to add sysfs attribs\n", __func__);
+		goto err_out;
+	}
+
+	/* create a symlink from the edac device
+	 * to the platform 'device' being used for this
+	 */
+	err = sysfs_create_link(edac_kobj,
+				&edac_dev->dev->kobj, EDAC_DEVICE_SYMLINK);
+	if (err) {
+		debugf0("%s() sysfs_create_link() returned err= %d\n",
+			__func__, err);
+		goto err_remove_main_attribs;
+	}
+
+	/* Create the first level instance directories
+	 * In turn, the nested blocks beneath the instances will
+	 * be registered as well
+	 */
+	err = edac_device_create_instances(edac_dev);
+	if (err) {
+		debugf0("%s() edac_device_create_instances() "
+			"returned err= %d\n", __func__, err);
+		goto err_remove_link;
+	}
+
+
+	debugf4("%s() create-instances done, idx=%d\n",
+		__func__, edac_dev->dev_idx);
+
+	return 0;
+
+	/* Error unwind stack */
+err_remove_link:
+	/* remove the sym link */
+	sysfs_remove_link(&edac_dev->kobj, EDAC_DEVICE_SYMLINK);
+
+err_remove_main_attribs:
+	edac_device_remove_main_sysfs_attributes(edac_dev);
+
+err_out:
+	return err;
+}
+
+/*
+ * edac_device_remove_sysfs() destructor
+ *
+ * given an edac_device struct, tear down the kobject resources
+ */
+void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev)
+{
+	debugf0("%s()\n", __func__);
+
+	/* remove any main attributes for this device */
+	edac_device_remove_main_sysfs_attributes(edac_dev);
+
+	/* remove the device sym link */
+	sysfs_remove_link(&edac_dev->kobj, EDAC_DEVICE_SYMLINK);
+
+	/* walk the instance/block kobject tree, deconstructing it */
+	edac_device_delete_instances(edac_dev);
+}
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
new file mode 100644
index 0000000..b629c41
--- /dev/null
+++ b/drivers/edac/edac_mc.c
@@ -0,0 +1,886 @@
+/*
+ * edac_mc kernel module
+ * (C) 2005, 2006 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ *	http://www.anime.net/~goemon/linux-ecc/
+ *
+ * Modified by Dave Peterson and Doug Thompson
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/highmem.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sysdev.h>
+#include <linux/ctype.h>
+#include <linux/edac.h>
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/edac.h>
+#include "edac_core.h"
+#include "edac_module.h"
+
+/* lock to memory controller's control array */
+static DEFINE_MUTEX(mem_ctls_mutex);
+static LIST_HEAD(mc_devices);
+
+#ifdef CONFIG_EDAC_DEBUG
+
+static void edac_mc_dump_channel(struct channel_info *chan)
+{
+	debugf4("\tchannel = %p\n", chan);
+	debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
+	debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
+	debugf4("\tchannel->label = '%s'\n", chan->label);
+	debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
+}
+
+static void edac_mc_dump_csrow(struct csrow_info *csrow)
+{
+	debugf4("\tcsrow = %p\n", csrow);
+	debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
+	debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
+	debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
+	debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
+	debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
+	debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
+	debugf4("\tcsrow->channels = %p\n", csrow->channels);
+	debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
+}
+
+static void edac_mc_dump_mci(struct mem_ctl_info *mci)
+{
+	debugf3("\tmci = %p\n", mci);
+	debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
+	debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
+	debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
+	debugf4("\tmci->edac_check = %p\n", mci->edac_check);
+	debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
+		mci->nr_csrows, mci->csrows);
+	debugf3("\tdev = %p\n", mci->dev);
+	debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
+	debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
+}
+
+#endif				/* CONFIG_EDAC_DEBUG */
+
+/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
+ * Adjust 'ptr' so that its alignment is at least as stringent as what the
+ * compiler would provide for X and return the aligned result.
+ *
+ * If 'size' is a constant, the compiler will optimize this whole function
+ * down to either a no-op or the addition of a constant to the value of 'ptr'.
+ */
+void *edac_align_ptr(void *ptr, unsigned size)
+{
+	unsigned align, r;
+
+	/* Here we assume that the alignment of a "long long" is the most
+	 * stringent alignment that the compiler will ever provide by default.
+	 * As far as I know, this is a reasonable assumption.
+	 */
+	if (size > sizeof(long))
+		align = sizeof(long long);
+	else if (size > sizeof(int))
+		align = sizeof(long);
+	else if (size > sizeof(short))
+		align = sizeof(int);
+	else if (size > sizeof(char))
+		align = sizeof(short);
+	else
+		return (char *)ptr;
+
+	r = size % align;
+
+	if (r == 0)
+		return (char *)ptr;
+
+	return (void *)(((unsigned long)ptr) + align - r);
+}
+
+/**
+ * edac_mc_alloc: Allocate a struct mem_ctl_info structure
+ * @size_pvt:	size of private storage needed
+ * @nr_csrows:	Number of CWROWS needed for this MC
+ * @nr_chans:	Number of channels for the MC
+ *
+ * Everything is kmalloc'ed as one big chunk - more efficient.
+ * Only can be used if all structures have the same lifetime - otherwise
+ * you have to allocate and initialize your own structures.
+ *
+ * Use edac_mc_free() to free mc structures allocated by this function.
+ *
+ * Returns:
+ *	NULL allocation failed
+ *	struct mem_ctl_info pointer
+ */
+struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
+				unsigned nr_chans, int edac_index)
+{
+	struct mem_ctl_info *mci;
+	struct csrow_info *csi, *csrow;
+	struct channel_info *chi, *chp, *chan;
+	void *pvt;
+	unsigned size;
+	int row, chn;
+	int err;
+
+	/* Figure out the offsets of the various items from the start of an mc
+	 * structure.  We want the alignment of each item to be at least as
+	 * stringent as what the compiler would provide if we could simply
+	 * hardcode everything into a single struct.
+	 */
+	mci = (struct mem_ctl_info *)0;
+	csi = edac_align_ptr(&mci[1], sizeof(*csi));
+	chi = edac_align_ptr(&csi[nr_csrows], sizeof(*chi));
+	pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
+	size = ((unsigned long)pvt) + sz_pvt;
+
+	mci = kzalloc(size, GFP_KERNEL);
+	if (mci == NULL)
+		return NULL;
+
+	/* Adjust pointers so they point within the memory we just allocated
+	 * rather than an imaginary chunk of memory located at address 0.
+	 */
+	csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
+	chi = (struct channel_info *)(((char *)mci) + ((unsigned long)chi));
+	pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
+
+	/* setup index and various internal pointers */
+	mci->mc_idx = edac_index;
+	mci->csrows = csi;
+	mci->pvt_info = pvt;
+	mci->nr_csrows = nr_csrows;
+
+	for (row = 0; row < nr_csrows; row++) {
+		csrow = &csi[row];
+		csrow->csrow_idx = row;
+		csrow->mci = mci;
+		csrow->nr_channels = nr_chans;
+		chp = &chi[row * nr_chans];
+		csrow->channels = chp;
+
+		for (chn = 0; chn < nr_chans; chn++) {
+			chan = &chp[chn];
+			chan->chan_idx = chn;
+			chan->csrow = csrow;
+		}
+	}
+
+	mci->op_state = OP_ALLOC;
+
+	/*
+	 * Initialize the 'root' kobj for the edac_mc controller
+	 */
+	err = edac_mc_register_sysfs_main_kobj(mci);
+	if (err) {
+		kfree(mci);
+		return NULL;
+	}
+
+	/* at this point, the root kobj is valid, and in order to
+	 * 'free' the object, then the function:
+	 *      edac_mc_unregister_sysfs_main_kobj() must be called
+	 * which will perform kobj unregistration and the actual free
+	 * will occur during the kobject callback operation
+	 */
+	return mci;
+}
+EXPORT_SYMBOL_GPL(edac_mc_alloc);
+
+/**
+ * edac_mc_free
+ *	'Free' a previously allocated 'mci' structure
+ * @mci: pointer to a struct mem_ctl_info structure
+ */
+void edac_mc_free(struct mem_ctl_info *mci)
+{
+	edac_mc_unregister_sysfs_main_kobj(mci);
+}
+EXPORT_SYMBOL_GPL(edac_mc_free);
+
+
+/*
+ * find_mci_by_dev
+ *
+ *	scan list of controllers looking for the one that manages
+ *	the 'dev' device
+ */
+static struct mem_ctl_info *find_mci_by_dev(struct device *dev)
+{
+	struct mem_ctl_info *mci;
+	struct list_head *item;
+
+	debugf3("%s()\n", __func__);
+
+	list_for_each(item, &mc_devices) {
+		mci = list_entry(item, struct mem_ctl_info, link);
+
+		if (mci->dev == dev)
+			return mci;
+	}
+
+	return NULL;
+}
+
+/*
+ * handler for EDAC to check if NMI type handler has asserted interrupt
+ */
+static int edac_mc_assert_error_check_and_clear(void)
+{
+	int old_state;
+
+	if (edac_op_state == EDAC_OPSTATE_POLL)
+		return 1;
+
+	old_state = edac_err_assert;
+	edac_err_assert = 0;
+
+	return old_state;
+}
+
+/*
+ * edac_mc_workq_function
+ *	performs the operation scheduled by a workq request
+ */
+static void edac_mc_workq_function(struct work_struct *work_req)
+{
+	struct delayed_work *d_work = to_delayed_work(work_req);
+	struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
+
+	mutex_lock(&mem_ctls_mutex);
+
+	/* if this control struct has movd to offline state, we are done */
+	if (mci->op_state == OP_OFFLINE) {
+		mutex_unlock(&mem_ctls_mutex);
+		return;
+	}
+
+	/* Only poll controllers that are running polled and have a check */
+	if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
+		mci->edac_check(mci);
+
+	mutex_unlock(&mem_ctls_mutex);
+
+	/* Reschedule */
+	queue_delayed_work(edac_workqueue, &mci->work,
+			msecs_to_jiffies(edac_mc_get_poll_msec()));
+}
+
+/*
+ * edac_mc_workq_setup
+ *	initialize a workq item for this mci
+ *	passing in the new delay period in msec
+ *
+ *	locking model:
+ *
+ *		called with the mem_ctls_mutex held
+ */
+static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
+{
+	debugf0("%s()\n", __func__);
+
+	/* if this instance is not in the POLL state, then simply return */
+	if (mci->op_state != OP_RUNNING_POLL)
+		return;
+
+	INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+	queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
+}
+
+/*
+ * edac_mc_workq_teardown
+ *	stop the workq processing on this mci
+ *
+ *	locking model:
+ *
+ *		called WITHOUT lock held
+ */
+static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
+{
+	int status;
+
+	status = cancel_delayed_work(&mci->work);
+	if (status == 0) {
+		debugf0("%s() not canceled, flush the queue\n",
+			__func__);
+
+		/* workq instance might be running, wait for it */
+		flush_workqueue(edac_workqueue);
+	}
+}
+
+/*
+ * edac_mc_reset_delay_period(unsigned long value)
+ *
+ *	user space has updated our poll period value, need to
+ *	reset our workq delays
+ */
+void edac_mc_reset_delay_period(int value)
+{
+	struct mem_ctl_info *mci;
+	struct list_head *item;
+
+	mutex_lock(&mem_ctls_mutex);
+
+	/* scan the list and turn off all workq timers, doing so under lock
+	 */
+	list_for_each(item, &mc_devices) {
+		mci = list_entry(item, struct mem_ctl_info, link);
+
+		if (mci->op_state == OP_RUNNING_POLL)
+			cancel_delayed_work(&mci->work);
+	}
+
+	mutex_unlock(&mem_ctls_mutex);
+
+
+	/* re-walk the list, and reset the poll delay */
+	mutex_lock(&mem_ctls_mutex);
+
+	list_for_each(item, &mc_devices) {
+		mci = list_entry(item, struct mem_ctl_info, link);
+
+		edac_mc_workq_setup(mci, (unsigned long) value);
+	}
+
+	mutex_unlock(&mem_ctls_mutex);
+}
+
+
+
+/* Return 0 on success, 1 on failure.
+ * Before calling this function, caller must
+ * assign a unique value to mci->mc_idx.
+ *
+ *	locking model:
+ *
+ *		called with the mem_ctls_mutex lock held
+ */
+static int add_mc_to_global_list(struct mem_ctl_info *mci)
+{
+	struct list_head *item, *insert_before;
+	struct mem_ctl_info *p;
+
+	insert_before = &mc_devices;
+
+	p = find_mci_by_dev(mci->dev);
+	if (unlikely(p != NULL))
+		goto fail0;
+
+	list_for_each(item, &mc_devices) {
+		p = list_entry(item, struct mem_ctl_info, link);
+
+		if (p->mc_idx >= mci->mc_idx) {
+			if (unlikely(p->mc_idx == mci->mc_idx))
+				goto fail1;
+
+			insert_before = item;
+			break;
+		}
+	}
+
+	list_add_tail_rcu(&mci->link, insert_before);
+	atomic_inc(&edac_handlers);
+	return 0;
+
+fail0:
+	edac_printk(KERN_WARNING, EDAC_MC,
+		"%s (%s) %s %s already assigned %d\n", dev_name(p->dev),
+		edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
+	return 1;
+
+fail1:
+	edac_printk(KERN_WARNING, EDAC_MC,
+		"bug in low-level driver: attempt to assign\n"
+		"    duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
+	return 1;
+}
+
+static void complete_mc_list_del(struct rcu_head *head)
+{
+	struct mem_ctl_info *mci;
+
+	mci = container_of(head, struct mem_ctl_info, rcu);
+	INIT_LIST_HEAD(&mci->link);
+}
+
+static void del_mc_from_global_list(struct mem_ctl_info *mci)
+{
+	atomic_dec(&edac_handlers);
+	list_del_rcu(&mci->link);
+	call_rcu(&mci->rcu, complete_mc_list_del);
+	rcu_barrier();
+}
+
+/**
+ * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
+ *
+ * If found, return a pointer to the structure.
+ * Else return NULL.
+ *
+ * Caller must hold mem_ctls_mutex.
+ */
+struct mem_ctl_info *edac_mc_find(int idx)
+{
+	struct list_head *item;
+	struct mem_ctl_info *mci;
+
+	list_for_each(item, &mc_devices) {
+		mci = list_entry(item, struct mem_ctl_info, link);
+
+		if (mci->mc_idx >= idx) {
+			if (mci->mc_idx == idx)
+				return mci;
+
+			break;
+		}
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(edac_mc_find);
+
+/**
+ * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
+ *                 create sysfs entries associated with mci structure
+ * @mci: pointer to the mci structure to be added to the list
+ * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure.
+ *
+ * Return:
+ *	0	Success
+ *	!0	Failure
+ */
+
+/* FIXME - should a warning be printed if no error detection? correction? */
+int edac_mc_add_mc(struct mem_ctl_info *mci)
+{
+	debugf0("%s()\n", __func__);
+
+#ifdef CONFIG_EDAC_DEBUG
+	if (edac_debug_level >= 3)
+		edac_mc_dump_mci(mci);
+
+	if (edac_debug_level >= 4) {
+		int i;
+
+		for (i = 0; i < mci->nr_csrows; i++) {
+			int j;
+
+			edac_mc_dump_csrow(&mci->csrows[i]);
+			for (j = 0; j < mci->csrows[i].nr_channels; j++)
+				edac_mc_dump_channel(&mci->csrows[i].
+						channels[j]);
+		}
+	}
+#endif
+	mutex_lock(&mem_ctls_mutex);
+
+	if (add_mc_to_global_list(mci))
+		goto fail0;
+
+	/* set load time so that error rate can be tracked */
+	mci->start_time = jiffies;
+
+	if (edac_create_sysfs_mci_device(mci)) {
+		edac_mc_printk(mci, KERN_WARNING,
+			"failed to create sysfs device\n");
+		goto fail1;
+	}
+
+	/* If there IS a check routine, then we are running POLLED */
+	if (mci->edac_check != NULL) {
+		/* This instance is NOW RUNNING */
+		mci->op_state = OP_RUNNING_POLL;
+
+		edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
+	} else {
+		mci->op_state = OP_RUNNING_INTERRUPT;
+	}
+
+	/* Report action taken */
+	edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
+		" DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
+
+	mutex_unlock(&mem_ctls_mutex);
+	return 0;
+
+fail1:
+	del_mc_from_global_list(mci);
+
+fail0:
+	mutex_unlock(&mem_ctls_mutex);
+	return 1;
+}
+EXPORT_SYMBOL_GPL(edac_mc_add_mc);
+
+/**
+ * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
+ *                 remove mci structure from global list
+ * @pdev: Pointer to 'struct device' representing mci structure to remove.
+ *
+ * Return pointer to removed mci structure, or NULL if device not found.
+ */
+struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
+{
+	struct mem_ctl_info *mci;
+
+	debugf0("%s()\n", __func__);
+
+	mutex_lock(&mem_ctls_mutex);
+
+	/* find the requested mci struct in the global list */
+	mci = find_mci_by_dev(dev);
+	if (mci == NULL) {
+		mutex_unlock(&mem_ctls_mutex);
+		return NULL;
+	}
+
+	/* marking MCI offline */
+	mci->op_state = OP_OFFLINE;
+
+	del_mc_from_global_list(mci);
+	mutex_unlock(&mem_ctls_mutex);
+
+	/* flush workq processes and remove sysfs */
+	edac_mc_workq_teardown(mci);
+	edac_remove_sysfs_mci_device(mci);
+
+	edac_printk(KERN_INFO, EDAC_MC,
+		"Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
+		mci->mod_name, mci->ctl_name, edac_dev_name(mci));
+
+	return mci;
+}
+EXPORT_SYMBOL_GPL(edac_mc_del_mc);
+
+static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
+				u32 size)
+{
+	struct page *pg;
+	void *virt_addr;
+	unsigned long flags = 0;
+
+	debugf3("%s()\n", __func__);
+
+	/* ECC error page was not in our memory. Ignore it. */
+	if (!pfn_valid(page))
+		return;
+
+	/* Find the actual page structure then map it and fix */
+	pg = pfn_to_page(page);
+
+	if (PageHighMem(pg))
+		local_irq_save(flags);
+
+	virt_addr = kmap_atomic(pg, KM_BOUNCE_READ);
+
+	/* Perform architecture specific atomic scrub operation */
+	atomic_scrub(virt_addr + offset, size);
+
+	/* Unmap and complete */
+	kunmap_atomic(virt_addr, KM_BOUNCE_READ);
+
+	if (PageHighMem(pg))
+		local_irq_restore(flags);
+}
+
+/* FIXME - should return -1 */
+int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
+{
+	struct csrow_info *csrows = mci->csrows;
+	int row, i;
+
+	debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
+	row = -1;
+
+	for (i = 0; i < mci->nr_csrows; i++) {
+		struct csrow_info *csrow = &csrows[i];
+
+		if (csrow->nr_pages == 0)
+			continue;
+
+		debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
+			"mask(0x%lx)\n", mci->mc_idx, __func__,
+			csrow->first_page, page, csrow->last_page,
+			csrow->page_mask);
+
+		if ((page >= csrow->first_page) &&
+		    (page <= csrow->last_page) &&
+		    ((page & csrow->page_mask) ==
+		     (csrow->first_page & csrow->page_mask))) {
+			row = i;
+			break;
+		}
+	}
+
+	if (row == -1)
+		edac_mc_printk(mci, KERN_ERR,
+			"could not look up page error address %lx\n",
+			(unsigned long)page);
+
+	return row;
+}
+EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
+
+/* FIXME - setable log (warning/emerg) levels */
+/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
+void edac_mc_handle_ce(struct mem_ctl_info *mci,
+		unsigned long page_frame_number,
+		unsigned long offset_in_page, unsigned long syndrome,
+		int row, int channel, const char *msg)
+{
+	unsigned long remapped_page;
+
+	debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
+
+	/* FIXME - maybe make panic on INTERNAL ERROR an option */
+	if (row >= mci->nr_csrows || row < 0) {
+		/* something is wrong */
+		edac_mc_printk(mci, KERN_ERR,
+			"INTERNAL ERROR: row out of range "
+			"(%d >= %d)\n", row, mci->nr_csrows);
+		edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+		return;
+	}
+
+	if (channel >= mci->csrows[row].nr_channels || channel < 0) {
+		/* something is wrong */
+		edac_mc_printk(mci, KERN_ERR,
+			"INTERNAL ERROR: channel out of range "
+			"(%d >= %d)\n", channel,
+			mci->csrows[row].nr_channels);
+		edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+		return;
+	}
+
+	if (edac_mc_get_log_ce())
+		/* FIXME - put in DIMM location */
+		edac_mc_printk(mci, KERN_WARNING,
+			"CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
+			"0x%lx, row %d, channel %d, label \"%s\": %s\n",
+			page_frame_number, offset_in_page,
+			mci->csrows[row].grain, syndrome, row, channel,
+			mci->csrows[row].channels[channel].label, msg);
+
+	mci->ce_count++;
+	mci->csrows[row].ce_count++;
+	mci->csrows[row].channels[channel].ce_count++;
+
+	if (mci->scrub_mode & SCRUB_SW_SRC) {
+		/*
+		 * Some MC's can remap memory so that it is still available
+		 * at a different address when PCI devices map into memory.
+		 * MC's that can't do this lose the memory where PCI devices
+		 * are mapped.  This mapping is MC dependant and so we call
+		 * back into the MC driver for it to map the MC page to
+		 * a physical (CPU) page which can then be mapped to a virtual
+		 * page - which can then be scrubbed.
+		 */
+		remapped_page = mci->ctl_page_to_phys ?
+			mci->ctl_page_to_phys(mci, page_frame_number) :
+			page_frame_number;
+
+		edac_mc_scrub_block(remapped_page, offset_in_page,
+				mci->csrows[row].grain);
+	}
+}
+EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
+
+void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
+{
+	if (edac_mc_get_log_ce())
+		edac_mc_printk(mci, KERN_WARNING,
+			"CE - no information available: %s\n", msg);
+
+	mci->ce_noinfo_count++;
+	mci->ce_count++;
+}
+EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
+
+void edac_mc_handle_ue(struct mem_ctl_info *mci,
+		unsigned long page_frame_number,
+		unsigned long offset_in_page, int row, const char *msg)
+{
+	int len = EDAC_MC_LABEL_LEN * 4;
+	char labels[len + 1];
+	char *pos = labels;
+	int chan;
+	int chars;
+
+	debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
+
+	/* FIXME - maybe make panic on INTERNAL ERROR an option */
+	if (row >= mci->nr_csrows || row < 0) {
+		/* something is wrong */
+		edac_mc_printk(mci, KERN_ERR,
+			"INTERNAL ERROR: row out of range "
+			"(%d >= %d)\n", row, mci->nr_csrows);
+		edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
+		return;
+	}
+
+	chars = snprintf(pos, len + 1, "%s",
+			 mci->csrows[row].channels[0].label);
+	len -= chars;
+	pos += chars;
+
+	for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
+		chan++) {
+		chars = snprintf(pos, len + 1, ":%s",
+				 mci->csrows[row].channels[chan].label);
+		len -= chars;
+		pos += chars;
+	}
+
+	if (edac_mc_get_log_ue())
+		edac_mc_printk(mci, KERN_EMERG,
+			"UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
+			"labels \"%s\": %s\n", page_frame_number,
+			offset_in_page, mci->csrows[row].grain, row,
+			labels, msg);
+
+	if (edac_mc_get_panic_on_ue())
+		panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
+			"row %d, labels \"%s\": %s\n", mci->mc_idx,
+			page_frame_number, offset_in_page,
+			mci->csrows[row].grain, row, labels, msg);
+
+	mci->ue_count++;
+	mci->csrows[row].ue_count++;
+}
+EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
+
+void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
+{
+	if (edac_mc_get_panic_on_ue())
+		panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
+
+	if (edac_mc_get_log_ue())
+		edac_mc_printk(mci, KERN_WARNING,
+			"UE - no information available: %s\n", msg);
+	mci->ue_noinfo_count++;
+	mci->ue_count++;
+}
+EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
+
+/*************************************************************
+ * On Fully Buffered DIMM modules, this help function is
+ * called to process UE events
+ */
+void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
+			unsigned int csrow,
+			unsigned int channela,
+			unsigned int channelb, char *msg)
+{
+	int len = EDAC_MC_LABEL_LEN * 4;
+	char labels[len + 1];
+	char *pos = labels;
+	int chars;
+
+	if (csrow >= mci->nr_csrows) {
+		/* something is wrong */
+		edac_mc_printk(mci, KERN_ERR,
+			"INTERNAL ERROR: row out of range (%d >= %d)\n",
+			csrow, mci->nr_csrows);
+		edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
+		return;
+	}
+
+	if (channela >= mci->csrows[csrow].nr_channels) {
+		/* something is wrong */
+		edac_mc_printk(mci, KERN_ERR,
+			"INTERNAL ERROR: channel-a out of range "
+			"(%d >= %d)\n",
+			channela, mci->csrows[csrow].nr_channels);
+		edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
+		return;
+	}
+
+	if (channelb >= mci->csrows[csrow].nr_channels) {
+		/* something is wrong */
+		edac_mc_printk(mci, KERN_ERR,
+			"INTERNAL ERROR: channel-b out of range "
+			"(%d >= %d)\n",
+			channelb, mci->csrows[csrow].nr_channels);
+		edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
+		return;
+	}
+
+	mci->ue_count++;
+	mci->csrows[csrow].ue_count++;
+
+	/* Generate the DIMM labels from the specified channels */
+	chars = snprintf(pos, len + 1, "%s",
+			 mci->csrows[csrow].channels[channela].label);
+	len -= chars;
+	pos += chars;
+	chars = snprintf(pos, len + 1, "-%s",
+			 mci->csrows[csrow].channels[channelb].label);
+
+	if (edac_mc_get_log_ue())
+		edac_mc_printk(mci, KERN_EMERG,
+			"UE row %d, channel-a= %d channel-b= %d "
+			"labels \"%s\": %s\n", csrow, channela, channelb,
+			labels, msg);
+
+	if (edac_mc_get_panic_on_ue())
+		panic("UE row %d, channel-a= %d channel-b= %d "
+			"labels \"%s\": %s\n", csrow, channela,
+			channelb, labels, msg);
+}
+EXPORT_SYMBOL(edac_mc_handle_fbd_ue);
+
+/*************************************************************
+ * On Fully Buffered DIMM modules, this help function is
+ * called to process CE events
+ */
+void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
+			unsigned int csrow, unsigned int channel, char *msg)
+{
+
+	/* Ensure boundary values */
+	if (csrow >= mci->nr_csrows) {
+		/* something is wrong */
+		edac_mc_printk(mci, KERN_ERR,
+			"INTERNAL ERROR: row out of range (%d >= %d)\n",
+			csrow, mci->nr_csrows);
+		edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+		return;
+	}
+	if (channel >= mci->csrows[csrow].nr_channels) {
+		/* something is wrong */
+		edac_mc_printk(mci, KERN_ERR,
+			"INTERNAL ERROR: channel out of range (%d >= %d)\n",
+			channel, mci->csrows[csrow].nr_channels);
+		edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+		return;
+	}
+
+	if (edac_mc_get_log_ce())
+		/* FIXME - put in DIMM location */
+		edac_mc_printk(mci, KERN_WARNING,
+			"CE row %d, channel %d, label \"%s\": %s\n",
+			csrow, channel,
+			mci->csrows[csrow].channels[channel].label, msg);
+
+	mci->ce_count++;
+	mci->csrows[csrow].ce_count++;
+	mci->csrows[csrow].channels[channel].ce_count++;
+}
+EXPORT_SYMBOL(edac_mc_handle_fbd_ce);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
new file mode 100644
index 0000000..e1d4ce0
--- /dev/null
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -0,0 +1,941 @@
+/*
+ * edac_mc kernel module
+ * (C) 2005-2007 Linux Networx (http://lnxi.com)
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/bug.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+
+/* MC EDAC Controls, setable by module parameter, and sysfs */
+static int edac_mc_log_ue = 1;
+static int edac_mc_log_ce = 1;
+static int edac_mc_panic_on_ue;
+static int edac_mc_poll_msec = 1000;
+
+/* Getter functions for above */
+int edac_mc_get_log_ue(void)
+{
+	return edac_mc_log_ue;
+}
+
+int edac_mc_get_log_ce(void)
+{
+	return edac_mc_log_ce;
+}
+
+int edac_mc_get_panic_on_ue(void)
+{
+	return edac_mc_panic_on_ue;
+}
+
+/* this is temporary */
+int edac_mc_get_poll_msec(void)
+{
+	return edac_mc_poll_msec;
+}
+
+static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
+{
+	long l;
+	int ret;
+
+	if (!val)
+		return -EINVAL;
+
+	ret = strict_strtol(val, 0, &l);
+	if (ret == -EINVAL || ((int)l != l))
+		return -EINVAL;
+	*((int *)kp->arg) = l;
+
+	/* notify edac_mc engine to reset the poll period */
+	edac_mc_reset_delay_period(l);
+
+	return 0;
+}
+
+/* Parameter declarations for above */
+module_param(edac_mc_panic_on_ue, int, 0644);
+MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
+module_param(edac_mc_log_ue, int, 0644);
+MODULE_PARM_DESC(edac_mc_log_ue,
+		 "Log uncorrectable error to console: 0=off 1=on");
+module_param(edac_mc_log_ce, int, 0644);
+MODULE_PARM_DESC(edac_mc_log_ce,
+		 "Log correctable error to console: 0=off 1=on");
+module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
+		  &edac_mc_poll_msec, 0644);
+MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
+
+/*
+ * various constants for Memory Controllers
+ */
+static const char *mem_types[] = {
+	[MEM_EMPTY] = "Empty",
+	[MEM_RESERVED] = "Reserved",
+	[MEM_UNKNOWN] = "Unknown",
+	[MEM_FPM] = "FPM",
+	[MEM_EDO] = "EDO",
+	[MEM_BEDO] = "BEDO",
+	[MEM_SDR] = "Unbuffered-SDR",
+	[MEM_RDR] = "Registered-SDR",
+	[MEM_DDR] = "Unbuffered-DDR",
+	[MEM_RDDR] = "Registered-DDR",
+	[MEM_RMBS] = "RMBS",
+	[MEM_DDR2] = "Unbuffered-DDR2",
+	[MEM_FB_DDR2] = "FullyBuffered-DDR2",
+	[MEM_RDDR2] = "Registered-DDR2",
+	[MEM_XDR] = "XDR",
+	[MEM_DDR3] = "Unbuffered-DDR3",
+	[MEM_RDDR3] = "Registered-DDR3"
+};
+
+static const char *dev_types[] = {
+	[DEV_UNKNOWN] = "Unknown",
+	[DEV_X1] = "x1",
+	[DEV_X2] = "x2",
+	[DEV_X4] = "x4",
+	[DEV_X8] = "x8",
+	[DEV_X16] = "x16",
+	[DEV_X32] = "x32",
+	[DEV_X64] = "x64"
+};
+
+static const char *edac_caps[] = {
+	[EDAC_UNKNOWN] = "Unknown",
+	[EDAC_NONE] = "None",
+	[EDAC_RESERVED] = "Reserved",
+	[EDAC_PARITY] = "PARITY",
+	[EDAC_EC] = "EC",
+	[EDAC_SECDED] = "SECDED",
+	[EDAC_S2ECD2ED] = "S2ECD2ED",
+	[EDAC_S4ECD4ED] = "S4ECD4ED",
+	[EDAC_S8ECD8ED] = "S8ECD8ED",
+	[EDAC_S16ECD16ED] = "S16ECD16ED"
+};
+
+
+
+static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
+{
+	int *value = (int *)ptr;
+
+	if (isdigit(*buffer))
+		*value = simple_strtoul(buffer, NULL, 0);
+
+	return count;
+}
+
+
+/* EDAC sysfs CSROW data structures and methods
+ */
+
+/* Set of more default csrow<id> attribute show/store functions */
+static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data,
+				int private)
+{
+	return sprintf(data, "%u\n", csrow->ue_count);
+}
+
+static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data,
+				int private)
+{
+	return sprintf(data, "%u\n", csrow->ce_count);
+}
+
+static ssize_t csrow_size_show(struct csrow_info *csrow, char *data,
+				int private)
+{
+	return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
+}
+
+static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data,
+				int private)
+{
+	return sprintf(data, "%s\n", mem_types[csrow->mtype]);
+}
+
+static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data,
+				int private)
+{
+	return sprintf(data, "%s\n", dev_types[csrow->dtype]);
+}
+
+static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
+				int private)
+{
+	return sprintf(data, "%s\n", edac_caps[csrow->edac_mode]);
+}
+
+/* show/store functions for DIMM Label attributes */
+static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
+				char *data, int channel)
+{
+	/* if field has not been initialized, there is nothing to send */
+	if (!csrow->channels[channel].label[0])
+		return 0;
+
+	return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
+			csrow->channels[channel].label);
+}
+
+static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
+					const char *data,
+					size_t count, int channel)
+{
+	ssize_t max_size = 0;
+
+	max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
+	strncpy(csrow->channels[channel].label, data, max_size);
+	csrow->channels[channel].label[max_size] = '\0';
+
+	return max_size;
+}
+
+/* show function for dynamic chX_ce_count attribute */
+static ssize_t channel_ce_count_show(struct csrow_info *csrow,
+				char *data, int channel)
+{
+	return sprintf(data, "%u\n", csrow->channels[channel].ce_count);
+}
+
+/* csrow specific attribute structure */
+struct csrowdev_attribute {
+	struct attribute attr;
+	 ssize_t(*show) (struct csrow_info *, char *, int);
+	 ssize_t(*store) (struct csrow_info *, const char *, size_t, int);
+	int private;
+};
+
+#define to_csrow(k) container_of(k, struct csrow_info, kobj)
+#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr)
+
+/* Set of show/store higher level functions for default csrow attributes */
+static ssize_t csrowdev_show(struct kobject *kobj,
+			struct attribute *attr, char *buffer)
+{
+	struct csrow_info *csrow = to_csrow(kobj);
+	struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
+
+	if (csrowdev_attr->show)
+		return csrowdev_attr->show(csrow,
+					buffer, csrowdev_attr->private);
+	return -EIO;
+}
+
+static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
+			const char *buffer, size_t count)
+{
+	struct csrow_info *csrow = to_csrow(kobj);
+	struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
+
+	if (csrowdev_attr->store)
+		return csrowdev_attr->store(csrow,
+					buffer,
+					count, csrowdev_attr->private);
+	return -EIO;
+}
+
+static struct sysfs_ops csrowfs_ops = {
+	.show = csrowdev_show,
+	.store = csrowdev_store
+};
+
+#define CSROWDEV_ATTR(_name,_mode,_show,_store,_private)	\
+static struct csrowdev_attribute attr_##_name = {			\
+	.attr = {.name = __stringify(_name), .mode = _mode },	\
+	.show   = _show,					\
+	.store  = _store,					\
+	.private = _private,					\
+};
+
+/* default cwrow<id>/attribute files */
+CSROWDEV_ATTR(size_mb, S_IRUGO, csrow_size_show, NULL, 0);
+CSROWDEV_ATTR(dev_type, S_IRUGO, csrow_dev_type_show, NULL, 0);
+CSROWDEV_ATTR(mem_type, S_IRUGO, csrow_mem_type_show, NULL, 0);
+CSROWDEV_ATTR(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL, 0);
+CSROWDEV_ATTR(ue_count, S_IRUGO, csrow_ue_count_show, NULL, 0);
+CSROWDEV_ATTR(ce_count, S_IRUGO, csrow_ce_count_show, NULL, 0);
+
+/* default attributes of the CSROW<id> object */
+static struct csrowdev_attribute *default_csrow_attr[] = {
+	&attr_dev_type,
+	&attr_mem_type,
+	&attr_edac_mode,
+	&attr_size_mb,
+	&attr_ue_count,
+	&attr_ce_count,
+	NULL,
+};
+
+/* possible dynamic channel DIMM Label attribute files */
+CSROWDEV_ATTR(ch0_dimm_label, S_IRUGO | S_IWUSR,
+	channel_dimm_label_show, channel_dimm_label_store, 0);
+CSROWDEV_ATTR(ch1_dimm_label, S_IRUGO | S_IWUSR,
+	channel_dimm_label_show, channel_dimm_label_store, 1);
+CSROWDEV_ATTR(ch2_dimm_label, S_IRUGO | S_IWUSR,
+	channel_dimm_label_show, channel_dimm_label_store, 2);
+CSROWDEV_ATTR(ch3_dimm_label, S_IRUGO | S_IWUSR,
+	channel_dimm_label_show, channel_dimm_label_store, 3);
+CSROWDEV_ATTR(ch4_dimm_label, S_IRUGO | S_IWUSR,
+	channel_dimm_label_show, channel_dimm_label_store, 4);
+CSROWDEV_ATTR(ch5_dimm_label, S_IRUGO | S_IWUSR,
+	channel_dimm_label_show, channel_dimm_label_store, 5);
+
+/* Total possible dynamic DIMM Label attribute file table */
+static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = {
+	&attr_ch0_dimm_label,
+	&attr_ch1_dimm_label,
+	&attr_ch2_dimm_label,
+	&attr_ch3_dimm_label,
+	&attr_ch4_dimm_label,
+	&attr_ch5_dimm_label
+};
+
+/* possible dynamic channel ce_count attribute files */
+CSROWDEV_ATTR(ch0_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 0);
+CSROWDEV_ATTR(ch1_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 1);
+CSROWDEV_ATTR(ch2_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 2);
+CSROWDEV_ATTR(ch3_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 3);
+CSROWDEV_ATTR(ch4_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 4);
+CSROWDEV_ATTR(ch5_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 5);
+
+/* Total possible dynamic ce_count attribute file table */
+static struct csrowdev_attribute *dynamic_csrow_ce_count_attr[] = {
+	&attr_ch0_ce_count,
+	&attr_ch1_ce_count,
+	&attr_ch2_ce_count,
+	&attr_ch3_ce_count,
+	&attr_ch4_ce_count,
+	&attr_ch5_ce_count
+};
+
+#define EDAC_NR_CHANNELS	6
+
+/* Create dynamic CHANNEL files, indexed by 'chan',  under specifed CSROW */
+static int edac_create_channel_files(struct kobject *kobj, int chan)
+{
+	int err = -ENODEV;
+
+	if (chan >= EDAC_NR_CHANNELS)
+		return err;
+
+	/* create the DIMM label attribute file */
+	err = sysfs_create_file(kobj,
+				(struct attribute *)
+				dynamic_csrow_dimm_attr[chan]);
+
+	if (!err) {
+		/* create the CE Count attribute file */
+		err = sysfs_create_file(kobj,
+					(struct attribute *)
+					dynamic_csrow_ce_count_attr[chan]);
+	} else {
+		debugf1("%s()  dimm labels and ce_count files created",
+			__func__);
+	}
+
+	return err;
+}
+
+/* No memory to release for this kobj */
+static void edac_csrow_instance_release(struct kobject *kobj)
+{
+	struct mem_ctl_info *mci;
+	struct csrow_info *cs;
+
+	debugf1("%s()\n", __func__);
+
+	cs = container_of(kobj, struct csrow_info, kobj);
+	mci = cs->mci;
+
+	kobject_put(&mci->edac_mci_kobj);
+}
+
+/* the kobj_type instance for a CSROW */
+static struct kobj_type ktype_csrow = {
+	.release = edac_csrow_instance_release,
+	.sysfs_ops = &csrowfs_ops,
+	.default_attrs = (struct attribute **)default_csrow_attr,
+};
+
+/* Create a CSROW object under specifed edac_mc_device */
+static int edac_create_csrow_object(struct mem_ctl_info *mci,
+					struct csrow_info *csrow, int index)
+{
+	struct kobject *kobj_mci = &mci->edac_mci_kobj;
+	struct kobject *kobj;
+	int chan;
+	int err;
+
+	/* generate ..../edac/mc/mc<id>/csrow<index>   */
+	memset(&csrow->kobj, 0, sizeof(csrow->kobj));
+	csrow->mci = mci;	/* include container up link */
+
+	/* bump the mci instance's kobject's ref count */
+	kobj = kobject_get(&mci->edac_mci_kobj);
+	if (!kobj) {
+		err = -ENODEV;
+		goto err_out;
+	}
+
+	/* Instanstiate the csrow object */
+	err = kobject_init_and_add(&csrow->kobj, &ktype_csrow, kobj_mci,
+				   "csrow%d", index);
+	if (err)
+		goto err_release_top_kobj;
+
+	/* At this point, to release a csrow kobj, one must
+	 * call the kobject_put and allow that tear down
+	 * to work the releasing
+	 */
+
+	/* Create the dyanmic attribute files on this csrow,
+	 * namely, the DIMM labels and the channel ce_count
+	 */
+	for (chan = 0; chan < csrow->nr_channels; chan++) {
+		err = edac_create_channel_files(&csrow->kobj, chan);
+		if (err) {
+			/* special case the unregister here */
+			kobject_put(&csrow->kobj);
+			goto err_out;
+		}
+	}
+	kobject_uevent(&csrow->kobj, KOBJ_ADD);
+	return 0;
+
+	/* error unwind stack */
+err_release_top_kobj:
+	kobject_put(&mci->edac_mci_kobj);
+
+err_out:
+	return err;
+}
+
+/* default sysfs methods and data structures for the main MCI kobject */
+
+static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
+					const char *data, size_t count)
+{
+	int row, chan;
+
+	mci->ue_noinfo_count = 0;
+	mci->ce_noinfo_count = 0;
+	mci->ue_count = 0;
+	mci->ce_count = 0;
+
+	for (row = 0; row < mci->nr_csrows; row++) {
+		struct csrow_info *ri = &mci->csrows[row];
+
+		ri->ue_count = 0;
+		ri->ce_count = 0;
+
+		for (chan = 0; chan < ri->nr_channels; chan++)
+			ri->channels[chan].ce_count = 0;
+	}
+
+	mci->start_time = jiffies;
+	return count;
+}
+
+/* memory scrubbing */
+static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
+					const char *data, size_t count)
+{
+	u32 bandwidth = -1;
+
+	if (mci->set_sdram_scrub_rate) {
+
+		memctrl_int_store(&bandwidth, data, count);
+
+		if (!(*mci->set_sdram_scrub_rate) (mci, &bandwidth)) {
+			edac_printk(KERN_DEBUG, EDAC_MC,
+				"Scrub rate set successfully, applied: %d\n",
+				bandwidth);
+		} else {
+			/* FIXME: error codes maybe? */
+			edac_printk(KERN_DEBUG, EDAC_MC,
+				"Scrub rate set FAILED, could not apply: %d\n",
+				bandwidth);
+		}
+	} else {
+		/* FIXME: produce "not implemented" ERROR for user-side. */
+		edac_printk(KERN_WARNING, EDAC_MC,
+			"Memory scrubbing 'set'control is not implemented!\n");
+	}
+	return count;
+}
+
+static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
+{
+	u32 bandwidth = -1;
+
+	if (mci->get_sdram_scrub_rate) {
+		if (!(*mci->get_sdram_scrub_rate) (mci, &bandwidth)) {
+			edac_printk(KERN_DEBUG, EDAC_MC,
+				"Scrub rate successfully, fetched: %d\n",
+				bandwidth);
+		} else {
+			/* FIXME: error codes maybe? */
+			edac_printk(KERN_DEBUG, EDAC_MC,
+				"Scrub rate fetch FAILED, got: %d\n",
+				bandwidth);
+		}
+	} else {
+		/* FIXME: produce "not implemented" ERROR for user-side.  */
+		edac_printk(KERN_WARNING, EDAC_MC,
+			"Memory scrubbing 'get' control is not implemented\n");
+	}
+	return sprintf(data, "%d\n", bandwidth);
+}
+
+/* default attribute files for the MCI object */
+static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
+{
+	return sprintf(data, "%d\n", mci->ue_count);
+}
+
+static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
+{
+	return sprintf(data, "%d\n", mci->ce_count);
+}
+
+static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
+{
+	return sprintf(data, "%d\n", mci->ce_noinfo_count);
+}
+
+static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data)
+{
+	return sprintf(data, "%d\n", mci->ue_noinfo_count);
+}
+
+static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data)
+{
+	return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
+}
+
+static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
+{
+	return sprintf(data, "%s\n", mci->ctl_name);
+}
+
+static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
+{
+	int total_pages, csrow_idx;
+
+	for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
+		csrow_idx++) {
+		struct csrow_info *csrow = &mci->csrows[csrow_idx];
+
+		if (!csrow->nr_pages)
+			continue;
+
+		total_pages += csrow->nr_pages;
+	}
+
+	return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
+}
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj)
+#define to_mcidev_attr(a) container_of(a,struct mcidev_sysfs_attribute,attr)
+
+/* MCI show/store functions for top most object */
+static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
+			char *buffer)
+{
+	struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
+	struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+
+	if (mcidev_attr->show)
+		return mcidev_attr->show(mem_ctl_info, buffer);
+
+	return -EIO;
+}
+
+static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
+			const char *buffer, size_t count)
+{
+	struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
+	struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+
+	if (mcidev_attr->store)
+		return mcidev_attr->store(mem_ctl_info, buffer, count);
+
+	return -EIO;
+}
+
+/* Intermediate show/store table */
+static struct sysfs_ops mci_ops = {
+	.show = mcidev_show,
+	.store = mcidev_store
+};
+
+#define MCIDEV_ATTR(_name,_mode,_show,_store)			\
+static struct mcidev_sysfs_attribute mci_attr_##_name = {			\
+	.attr = {.name = __stringify(_name), .mode = _mode },	\
+	.show   = _show,					\
+	.store  = _store,					\
+};
+
+/* default Control file */
+MCIDEV_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
+
+/* default Attribute files */
+MCIDEV_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
+MCIDEV_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
+MCIDEV_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
+MCIDEV_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
+MCIDEV_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
+MCIDEV_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
+MCIDEV_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
+
+/* memory scrubber attribute file */
+MCIDEV_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show,
+	mci_sdram_scrub_rate_store);
+
+static struct mcidev_sysfs_attribute *mci_attr[] = {
+	&mci_attr_reset_counters,
+	&mci_attr_mc_name,
+	&mci_attr_size_mb,
+	&mci_attr_seconds_since_reset,
+	&mci_attr_ue_noinfo_count,
+	&mci_attr_ce_noinfo_count,
+	&mci_attr_ue_count,
+	&mci_attr_ce_count,
+	&mci_attr_sdram_scrub_rate,
+	NULL
+};
+
+
+/*
+ * Release of a MC controlling instance
+ *
+ *	each MC control instance has the following resources upon entry:
+ *		a) a ref count on the top memctl kobj
+ *		b) a ref count on this module
+ *
+ *	this function must decrement those ref counts and then
+ *	issue a free on the instance's memory
+ */
+static void edac_mci_control_release(struct kobject *kobj)
+{
+	struct mem_ctl_info *mci;
+
+	mci = to_mci(kobj);
+
+	debugf0("%s() mci instance idx=%d releasing\n", __func__, mci->mc_idx);
+
+	/* decrement the module ref count */
+	module_put(mci->owner);
+
+	/* free the mci instance memory here */
+	kfree(mci);
+}
+
+static struct kobj_type ktype_mci = {
+	.release = edac_mci_control_release,
+	.sysfs_ops = &mci_ops,
+	.default_attrs = (struct attribute **)mci_attr,
+};
+
+/* EDAC memory controller sysfs kset:
+ *	/sys/devices/system/edac/mc
+ */
+static struct kset *mc_kset;
+
+/*
+ * edac_mc_register_sysfs_main_kobj
+ *
+ *	setups and registers the main kobject for each mci
+ */
+int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci)
+{
+	struct kobject *kobj_mci;
+	int err;
+
+	debugf1("%s()\n", __func__);
+
+	kobj_mci = &mci->edac_mci_kobj;
+
+	/* Init the mci's kobject */
+	memset(kobj_mci, 0, sizeof(*kobj_mci));
+
+	/* Record which module 'owns' this control structure
+	 * and bump the ref count of the module
+	 */
+	mci->owner = THIS_MODULE;
+
+	/* bump ref count on this module */
+	if (!try_module_get(mci->owner)) {
+		err = -ENODEV;
+		goto fail_out;
+	}
+
+	/* this instance become part of the mc_kset */
+	kobj_mci->kset = mc_kset;
+
+	/* register the mc<id> kobject to the mc_kset */
+	err = kobject_init_and_add(kobj_mci, &ktype_mci, NULL,
+				   "mc%d", mci->mc_idx);
+	if (err) {
+		debugf1("%s()Failed to register '.../edac/mc%d'\n",
+			__func__, mci->mc_idx);
+		goto kobj_reg_fail;
+	}
+	kobject_uevent(kobj_mci, KOBJ_ADD);
+
+	/* At this point, to 'free' the control struct,
+	 * edac_mc_unregister_sysfs_main_kobj() must be used
+	 */
+
+	debugf1("%s() Registered '.../edac/mc%d' kobject\n",
+		__func__, mci->mc_idx);
+
+	return 0;
+
+	/* Error exit stack */
+
+kobj_reg_fail:
+	module_put(mci->owner);
+
+fail_out:
+	return err;
+}
+
+/*
+ * edac_mc_register_sysfs_main_kobj
+ *
+ *	tears down and the main mci kobject from the mc_kset
+ */
+void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci)
+{
+	/* delete the kobj from the mc_kset */
+	kobject_put(&mci->edac_mci_kobj);
+}
+
+#define EDAC_DEVICE_SYMLINK	"device"
+
+/*
+ * edac_create_mci_instance_attributes
+ *	create MC driver specific attributes at the topmost level
+ *	directory of this mci instance.
+ */
+static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci)
+{
+	int err;
+	struct mcidev_sysfs_attribute *sysfs_attrib;
+
+	/* point to the start of the array and iterate over it
+	 * adding each attribute listed to this mci instance's kobject
+	 */
+	sysfs_attrib = mci->mc_driver_sysfs_attributes;
+
+	while (sysfs_attrib && sysfs_attrib->attr.name) {
+		err = sysfs_create_file(&mci->edac_mci_kobj,
+					(struct attribute*) sysfs_attrib);
+		if (err) {
+			return err;
+		}
+
+		sysfs_attrib++;
+	}
+
+	return 0;
+}
+
+/*
+ * edac_remove_mci_instance_attributes
+ *	remove MC driver specific attributes at the topmost level
+ *	directory of this mci instance.
+ */
+static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci)
+{
+	struct mcidev_sysfs_attribute *sysfs_attrib;
+
+	/* point to the start of the array and iterate over it
+	 * adding each attribute listed to this mci instance's kobject
+	 */
+	sysfs_attrib = mci->mc_driver_sysfs_attributes;
+
+	/* loop if there are attributes and until we hit a NULL entry */
+	while (sysfs_attrib && sysfs_attrib->attr.name) {
+		sysfs_remove_file(&mci->edac_mci_kobj,
+					(struct attribute *) sysfs_attrib);
+		sysfs_attrib++;
+	}
+}
+
+
+/*
+ * Create a new Memory Controller kobject instance,
+ *	mc<id> under the 'mc' directory
+ *
+ * Return:
+ *	0	Success
+ *	!0	Failure
+ */
+int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
+{
+	int i;
+	int err;
+	struct csrow_info *csrow;
+	struct kobject *kobj_mci = &mci->edac_mci_kobj;
+
+	debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
+
+	/* create a symlink for the device */
+	err = sysfs_create_link(kobj_mci, &mci->dev->kobj,
+				EDAC_DEVICE_SYMLINK);
+	if (err) {
+		debugf1("%s() failure to create symlink\n", __func__);
+		goto fail0;
+	}
+
+	/* If the low level driver desires some attributes,
+	 * then create them now for the driver.
+	 */
+	if (mci->mc_driver_sysfs_attributes) {
+		err = edac_create_mci_instance_attributes(mci);
+		if (err) {
+			debugf1("%s() failure to create mci attributes\n",
+				__func__);
+			goto fail0;
+		}
+	}
+
+	/* Make directories for each CSROW object under the mc<id> kobject
+	 */
+	for (i = 0; i < mci->nr_csrows; i++) {
+		csrow = &mci->csrows[i];
+
+		/* Only expose populated CSROWs */
+		if (csrow->nr_pages > 0) {
+			err = edac_create_csrow_object(mci, csrow, i);
+			if (err) {
+				debugf1("%s() failure: create csrow %d obj\n",
+					__func__, i);
+				goto fail1;
+			}
+		}
+	}
+
+	return 0;
+
+	/* CSROW error: backout what has already been registered,  */
+fail1:
+	for (i--; i >= 0; i--) {
+		if (csrow->nr_pages > 0) {
+			kobject_put(&mci->csrows[i].kobj);
+		}
+	}
+
+	/* remove the mci instance's attributes, if any */
+	edac_remove_mci_instance_attributes(mci);
+
+	/* remove the symlink */
+	sysfs_remove_link(kobj_mci, EDAC_DEVICE_SYMLINK);
+
+fail0:
+	return err;
+}
+
+/*
+ * remove a Memory Controller instance
+ */
+void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
+{
+	int i;
+
+	debugf0("%s()\n", __func__);
+
+	/* remove all csrow kobjects */
+	for (i = 0; i < mci->nr_csrows; i++) {
+		if (mci->csrows[i].nr_pages > 0) {
+			debugf0("%s()  unreg csrow-%d\n", __func__, i);
+			kobject_put(&mci->csrows[i].kobj);
+		}
+	}
+
+	debugf0("%s()  remove_link\n", __func__);
+
+	/* remove the symlink */
+	sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
+
+	debugf0("%s()  remove_mci_instance\n", __func__);
+
+	/* remove this mci instance's attribtes */
+	edac_remove_mci_instance_attributes(mci);
+
+	debugf0("%s()  unregister this mci kobj\n", __func__);
+
+	/* unregister this instance's kobject */
+	kobject_put(&mci->edac_mci_kobj);
+}
+
+
+
+
+/*
+ * edac_setup_sysfs_mc_kset(void)
+ *
+ * Initialize the mc_kset for the 'mc' entry
+ *	This requires creating the top 'mc' directory with a kset
+ *	and its controls/attributes.
+ *
+ *	To this 'mc' kset, instance 'mci' will be grouped as children.
+ *
+ * Return:  0 SUCCESS
+ *         !0 FAILURE error code
+ */
+int edac_sysfs_setup_mc_kset(void)
+{
+	int err = 0;
+	struct sysdev_class *edac_class;
+
+	debugf1("%s()\n", __func__);
+
+	/* get the /sys/devices/system/edac class reference */
+	edac_class = edac_get_edac_class();
+	if (edac_class == NULL) {
+		debugf1("%s() no edac_class error=%d\n", __func__, err);
+		goto fail_out;
+	}
+
+	/* Init the MC's kobject */
+	mc_kset = kset_create_and_add("mc", NULL, &edac_class->kset.kobj);
+	if (!mc_kset) {
+		err = -ENOMEM;
+		debugf1("%s() Failed to register '.../edac/mc'\n", __func__);
+		goto fail_out;
+	}
+
+	debugf1("%s() Registered '.../edac/mc' kobject\n", __func__);
+
+	return 0;
+
+
+	/* error unwind stack */
+fail_out:
+	return err;
+}
+
+/*
+ * edac_sysfs_teardown_mc_kset
+ *
+ *	deconstruct the mc_ket for memory controllers
+ */
+void edac_sysfs_teardown_mc_kset(void)
+{
+	kset_unregister(mc_kset);
+}
+
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
new file mode 100644
index 0000000..ac2aea8
--- /dev/null
+++ b/drivers/edac/edac_mce_amd.c
@@ -0,0 +1,456 @@
+#include <linux/module.h>
+#include "edac_mce_amd.h"
+
+static bool report_gart_errors;
+static void (*nb_bus_decoder)(int node_id, struct err_regs *regs);
+static void (*orig_mce_callback)(struct mce *m);
+
+void amd_report_gart_errors(bool v)
+{
+	report_gart_errors = v;
+}
+EXPORT_SYMBOL_GPL(amd_report_gart_errors);
+
+void amd_register_ecc_decoder(void (*f)(int, struct err_regs *))
+{
+	nb_bus_decoder = f;
+}
+EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
+
+void amd_unregister_ecc_decoder(void (*f)(int, struct err_regs *))
+{
+	if (nb_bus_decoder) {
+		WARN_ON(nb_bus_decoder != f);
+
+		nb_bus_decoder = NULL;
+	}
+}
+EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
+
+/*
+ * string representation for the different MCA reported error types, see F3x48
+ * or MSR0000_0411.
+ */
+const char *tt_msgs[] = {        /* transaction type */
+	"instruction",
+	"data",
+	"generic",
+	"reserved"
+};
+EXPORT_SYMBOL_GPL(tt_msgs);
+
+const char *ll_msgs[] = {	/* cache level */
+	"L0",
+	"L1",
+	"L2",
+	"L3/generic"
+};
+EXPORT_SYMBOL_GPL(ll_msgs);
+
+const char *rrrr_msgs[] = {
+	"generic",
+	"generic read",
+	"generic write",
+	"data read",
+	"data write",
+	"inst fetch",
+	"prefetch",
+	"evict",
+	"snoop",
+	"reserved RRRR= 9",
+	"reserved RRRR= 10",
+	"reserved RRRR= 11",
+	"reserved RRRR= 12",
+	"reserved RRRR= 13",
+	"reserved RRRR= 14",
+	"reserved RRRR= 15"
+};
+EXPORT_SYMBOL_GPL(rrrr_msgs);
+
+const char *pp_msgs[] = {	/* participating processor */
+	"local node originated (SRC)",
+	"local node responded to request (RES)",
+	"local node observed as 3rd party (OBS)",
+	"generic"
+};
+EXPORT_SYMBOL_GPL(pp_msgs);
+
+const char *to_msgs[] = {
+	"no timeout",
+	"timed out"
+};
+EXPORT_SYMBOL_GPL(to_msgs);
+
+const char *ii_msgs[] = {	/* memory or i/o */
+	"mem access",
+	"reserved",
+	"i/o access",
+	"generic"
+};
+EXPORT_SYMBOL_GPL(ii_msgs);
+
+/*
+ * Map the 4 or 5 (family-specific) bits of Extended Error code to the
+ * string table.
+ */
+const char *ext_msgs[] = {
+	"K8 ECC error",					/* 0_0000b */
+	"CRC error on link",				/* 0_0001b */
+	"Sync error packets on link",			/* 0_0010b */
+	"Master Abort during link operation",		/* 0_0011b */
+	"Target Abort during link operation",		/* 0_0100b */
+	"Invalid GART PTE entry during table walk",	/* 0_0101b */
+	"Unsupported atomic RMW command received",	/* 0_0110b */
+	"WDT error: NB transaction timeout",		/* 0_0111b */
+	"ECC/ChipKill ECC error",			/* 0_1000b */
+	"SVM DEV Error",				/* 0_1001b */
+	"Link Data error",				/* 0_1010b */
+	"Link/L3/Probe Filter Protocol error",		/* 0_1011b */
+	"NB Internal Arrays Parity error",		/* 0_1100b */
+	"DRAM Address/Control Parity error",		/* 0_1101b */
+	"Link Transmission error",			/* 0_1110b */
+	"GART/DEV Table Walk Data error"		/* 0_1111b */
+	"Res 0x100 error",				/* 1_0000b */
+	"Res 0x101 error",				/* 1_0001b */
+	"Res 0x102 error",				/* 1_0010b */
+	"Res 0x103 error",				/* 1_0011b */
+	"Res 0x104 error",				/* 1_0100b */
+	"Res 0x105 error",				/* 1_0101b */
+	"Res 0x106 error",				/* 1_0110b */
+	"Res 0x107 error",				/* 1_0111b */
+	"Res 0x108 error",				/* 1_1000b */
+	"Res 0x109 error",				/* 1_1001b */
+	"Res 0x10A error",				/* 1_1010b */
+	"Res 0x10B error",				/* 1_1011b */
+	"ECC error in L3 Cache Data",			/* 1_1100b */
+	"L3 Cache Tag error",				/* 1_1101b */
+	"L3 Cache LRU Parity error",			/* 1_1110b */
+	"Probe Filter error"				/* 1_1111b */
+};
+EXPORT_SYMBOL_GPL(ext_msgs);
+
+static void amd_decode_dc_mce(u64 mc0_status)
+{
+	u32 ec  = mc0_status & 0xffff;
+	u32 xec = (mc0_status >> 16) & 0xf;
+
+	pr_emerg(" Data Cache Error");
+
+	if (xec == 1 && TLB_ERROR(ec))
+		pr_cont(": %s TLB multimatch.\n", LL_MSG(ec));
+	else if (xec == 0) {
+		if (mc0_status & (1ULL << 40))
+			pr_cont(" during Data Scrub.\n");
+		else if (TLB_ERROR(ec))
+			pr_cont(": %s TLB parity error.\n", LL_MSG(ec));
+		else if (MEM_ERROR(ec)) {
+			u8 ll   = ec & 0x3;
+			u8 tt   = (ec >> 2) & 0x3;
+			u8 rrrr = (ec >> 4) & 0xf;
+
+			/* see F10h BKDG (31116), Table 92. */
+			if (ll == 0x1) {
+				if (tt != 0x1)
+					goto wrong_dc_mce;
+
+				pr_cont(": Data/Tag %s error.\n", RRRR_MSG(ec));
+
+			} else if (ll == 0x2 && rrrr == 0x3)
+				pr_cont(" during L1 linefill from L2.\n");
+			else
+				goto wrong_dc_mce;
+		} else if (BUS_ERROR(ec) && boot_cpu_data.x86 == 0xf)
+			pr_cont(" during system linefill.\n");
+		else
+			goto wrong_dc_mce;
+	} else
+		goto wrong_dc_mce;
+
+	return;
+
+wrong_dc_mce:
+	pr_warning("Corrupted DC MCE info?\n");
+}
+
+static void amd_decode_ic_mce(u64 mc1_status)
+{
+	u32 ec  = mc1_status & 0xffff;
+	u32 xec = (mc1_status >> 16) & 0xf;
+
+	pr_emerg(" Instruction Cache Error");
+
+	if (xec == 1 && TLB_ERROR(ec))
+		pr_cont(": %s TLB multimatch.\n", LL_MSG(ec));
+	else if (xec == 0) {
+		if (TLB_ERROR(ec))
+			pr_cont(": %s TLB Parity error.\n", LL_MSG(ec));
+		else if (BUS_ERROR(ec)) {
+			if (boot_cpu_data.x86 == 0xf &&
+			    (mc1_status & (1ULL << 58)))
+				pr_cont(" during system linefill.\n");
+			else
+				pr_cont(" during attempted NB data read.\n");
+		} else if (MEM_ERROR(ec)) {
+			u8 ll   = ec & 0x3;
+			u8 rrrr = (ec >> 4) & 0xf;
+
+			if (ll == 0x2)
+				pr_cont(" during a linefill from L2.\n");
+			else if (ll == 0x1) {
+
+				switch (rrrr) {
+				case 0x5:
+					pr_cont(": Parity error during "
+					       "data load.\n");
+					break;
+
+				case 0x7:
+					pr_cont(": Copyback Parity/Victim"
+						" error.\n");
+					break;
+
+				case 0x8:
+					pr_cont(": Tag Snoop error.\n");
+					break;
+
+				default:
+					goto wrong_ic_mce;
+					break;
+				}
+			}
+		} else
+			goto wrong_ic_mce;
+	} else
+		goto wrong_ic_mce;
+
+	return;
+
+wrong_ic_mce:
+	pr_warning("Corrupted IC MCE info?\n");
+}
+
+static void amd_decode_bu_mce(u64 mc2_status)
+{
+	u32 ec = mc2_status & 0xffff;
+	u32 xec = (mc2_status >> 16) & 0xf;
+
+	pr_emerg(" Bus Unit Error");
+
+	if (xec == 0x1)
+		pr_cont(" in the write data buffers.\n");
+	else if (xec == 0x3)
+		pr_cont(" in the victim data buffers.\n");
+	else if (xec == 0x2 && MEM_ERROR(ec))
+		pr_cont(": %s error in the L2 cache tags.\n", RRRR_MSG(ec));
+	else if (xec == 0x0) {
+		if (TLB_ERROR(ec))
+			pr_cont(": %s error in a Page Descriptor Cache or "
+				"Guest TLB.\n", TT_MSG(ec));
+		else if (BUS_ERROR(ec))
+			pr_cont(": %s/ECC error in data read from NB: %s.\n",
+				RRRR_MSG(ec), PP_MSG(ec));
+		else if (MEM_ERROR(ec)) {
+			u8 rrrr = (ec >> 4) & 0xf;
+
+			if (rrrr >= 0x7)
+				pr_cont(": %s error during data copyback.\n",
+					RRRR_MSG(ec));
+			else if (rrrr <= 0x1)
+				pr_cont(": %s parity/ECC error during data "
+					"access from L2.\n", RRRR_MSG(ec));
+			else
+				goto wrong_bu_mce;
+		} else
+			goto wrong_bu_mce;
+	} else
+		goto wrong_bu_mce;
+
+	return;
+
+wrong_bu_mce:
+	pr_warning("Corrupted BU MCE info?\n");
+}
+
+static void amd_decode_ls_mce(u64 mc3_status)
+{
+	u32 ec  = mc3_status & 0xffff;
+	u32 xec = (mc3_status >> 16) & 0xf;
+
+	pr_emerg(" Load Store Error");
+
+	if (xec == 0x0) {
+		u8 rrrr = (ec >> 4) & 0xf;
+
+		if (!BUS_ERROR(ec) || (rrrr != 0x3 && rrrr != 0x4))
+			goto wrong_ls_mce;
+
+		pr_cont(" during %s.\n", RRRR_MSG(ec));
+	}
+	return;
+
+wrong_ls_mce:
+	pr_warning("Corrupted LS MCE info?\n");
+}
+
+void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
+{
+	u32 ec  = ERROR_CODE(regs->nbsl);
+	u32 xec = EXT_ERROR_CODE(regs->nbsl);
+
+	if (!handle_errors)
+		return;
+
+	pr_emerg(" Northbridge Error, node %d", node_id);
+
+	/*
+	 * F10h, revD can disable ErrCpu[3:0] so check that first and also the
+	 * value encoding has changed so interpret those differently
+	 */
+	if ((boot_cpu_data.x86 == 0x10) &&
+	    (boot_cpu_data.x86_model > 8)) {
+		if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
+			pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
+	} else {
+		u8 assoc_cpus = regs->nbsh & 0xf;
+
+		if (assoc_cpus > 0)
+			pr_cont(", core: %d", fls(assoc_cpus) - 1);
+
+		pr_cont("\n");
+	}
+
+	pr_emerg("%s.\n", EXT_ERR_MSG(xec));
+
+	if (BUS_ERROR(ec) && nb_bus_decoder)
+		nb_bus_decoder(node_id, regs);
+}
+EXPORT_SYMBOL_GPL(amd_decode_nb_mce);
+
+static void amd_decode_fr_mce(u64 mc5_status)
+{
+	/* we have only one error signature so match all fields at once. */
+	if ((mc5_status & 0xffff) == 0x0f0f)
+		pr_emerg(" FR Error: CPU Watchdog timer expire.\n");
+	else
+		pr_warning("Corrupted FR MCE info?\n");
+}
+
+static inline void amd_decode_err_code(unsigned int ec)
+{
+	if (TLB_ERROR(ec)) {
+		/*
+		 * GART errors are intended to help graphics driver developers
+		 * to detect bad GART PTEs. It is recommended by AMD to disable
+		 * GART table walk error reporting by default[1] (currently
+		 * being disabled in mce_cpu_quirks()) and according to the
+		 * comment in mce_cpu_quirks(), such GART errors can be
+		 * incorrectly triggered. We may see these errors anyway and
+		 * unless requested by the user, they won't be reported.
+		 *
+		 * [1] section 13.10.1 on BIOS and Kernel Developers Guide for
+		 *     AMD NPT family 0Fh processors
+		 */
+		if (!report_gart_errors)
+			return;
+
+		pr_emerg(" Transaction: %s, Cache Level %s\n",
+			 TT_MSG(ec), LL_MSG(ec));
+	} else if (MEM_ERROR(ec)) {
+		pr_emerg(" Transaction: %s, Type: %s, Cache Level: %s",
+			 RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec));
+	} else if (BUS_ERROR(ec)) {
+		pr_emerg(" Transaction type: %s(%s), %s, Cache Level: %s, "
+			 "Participating Processor: %s\n",
+			  RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec),
+			  PP_MSG(ec));
+	} else
+		pr_warning("Huh? Unknown MCE error 0x%x\n", ec);
+}
+
+static void amd_decode_mce(struct mce *m)
+{
+	struct err_regs regs;
+	int node, ecc;
+
+	pr_emerg("MC%d_STATUS: ", m->bank);
+
+	pr_cont("%sorrected error, report: %s, MiscV: %svalid, "
+		 "CPU context corrupt: %s",
+		 ((m->status & MCI_STATUS_UC) ? "Unc"  : "C"),
+		 ((m->status & MCI_STATUS_EN) ? "yes"  : "no"),
+		 ((m->status & MCI_STATUS_MISCV) ? ""  : "in"),
+		 ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
+
+	/* do the two bits[14:13] together */
+	ecc = m->status & (3ULL << 45);
+	if (ecc)
+		pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
+
+	pr_cont("\n");
+
+	switch (m->bank) {
+	case 0:
+		amd_decode_dc_mce(m->status);
+		break;
+
+	case 1:
+		amd_decode_ic_mce(m->status);
+		break;
+
+	case 2:
+		amd_decode_bu_mce(m->status);
+		break;
+
+	case 3:
+		amd_decode_ls_mce(m->status);
+		break;
+
+	case 4:
+		regs.nbsl  = (u32) m->status;
+		regs.nbsh  = (u32)(m->status >> 32);
+		regs.nbeal = (u32) m->addr;
+		regs.nbeah = (u32)(m->addr >> 32);
+		node       = amd_get_nb_id(m->extcpu);
+
+		amd_decode_nb_mce(node, &regs, 1);
+		break;
+
+	case 5:
+		amd_decode_fr_mce(m->status);
+		break;
+
+	default:
+		break;
+	}
+
+	amd_decode_err_code(m->status & 0xffff);
+}
+
+static int __init mce_amd_init(void)
+{
+	/*
+	 * We can decode MCEs for Opteron and later CPUs:
+	 */
+	if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
+	    (boot_cpu_data.x86 >= 0xf)) {
+		/* safe the default decode mce callback */
+		orig_mce_callback = x86_mce_decode_callback;
+
+		x86_mce_decode_callback = amd_decode_mce;
+	}
+
+	return 0;
+}
+early_initcall(mce_amd_init);
+
+#ifdef MODULE
+static void __exit mce_amd_exit(void)
+{
+	x86_mce_decode_callback = orig_mce_callback;
+}
+
+MODULE_DESCRIPTION("AMD MCE decoder");
+MODULE_ALIAS("edac-mce-amd");
+MODULE_LICENSE("GPL");
+module_exit(mce_amd_exit);
+#endif
diff --git a/drivers/edac/edac_mce_amd.h b/drivers/edac/edac_mce_amd.h
new file mode 100644
index 0000000..df23ee0
--- /dev/null
+++ b/drivers/edac/edac_mce_amd.h
@@ -0,0 +1,69 @@
+#ifndef _EDAC_MCE_AMD_H
+#define _EDAC_MCE_AMD_H
+
+#include <asm/mce.h>
+
+#define ERROR_CODE(x)			((x) & 0xffff)
+#define EXT_ERROR_CODE(x)		(((x) >> 16) & 0x1f)
+#define EXT_ERR_MSG(x)			ext_msgs[EXT_ERROR_CODE(x)]
+
+#define LOW_SYNDROME(x)			(((x) >> 15) & 0xff)
+#define HIGH_SYNDROME(x)		(((x) >> 24) & 0xff)
+
+#define TLB_ERROR(x)			(((x) & 0xFFF0) == 0x0010)
+#define MEM_ERROR(x)			(((x) & 0xFF00) == 0x0100)
+#define BUS_ERROR(x)			(((x) & 0xF800) == 0x0800)
+
+#define TT(x)				(((x) >> 2) & 0x3)
+#define TT_MSG(x)			tt_msgs[TT(x)]
+#define II(x)				(((x) >> 2) & 0x3)
+#define II_MSG(x)			ii_msgs[II(x)]
+#define LL(x)				(((x) >> 0) & 0x3)
+#define LL_MSG(x)			ll_msgs[LL(x)]
+#define RRRR(x)				(((x) >> 4) & 0xf)
+#define RRRR_MSG(x)			rrrr_msgs[RRRR(x)]
+#define TO(x)				(((x) >> 8) & 0x1)
+#define TO_MSG(x)			to_msgs[TO(x)]
+#define PP(x)				(((x) >> 9) & 0x3)
+#define PP_MSG(x)			pp_msgs[PP(x)]
+
+#define K8_NBSH				0x4C
+
+#define K8_NBSH_VALID_BIT		BIT(31)
+#define K8_NBSH_OVERFLOW		BIT(30)
+#define K8_NBSH_UC_ERR			BIT(29)
+#define K8_NBSH_ERR_EN			BIT(28)
+#define K8_NBSH_MISCV			BIT(27)
+#define K8_NBSH_VALID_ERROR_ADDR	BIT(26)
+#define K8_NBSH_PCC			BIT(25)
+#define K8_NBSH_ERR_CPU_VAL		BIT(24)
+#define K8_NBSH_CECC			BIT(14)
+#define K8_NBSH_UECC			BIT(13)
+#define K8_NBSH_ERR_SCRUBER		BIT(8)
+
+extern const char *tt_msgs[];
+extern const char *ll_msgs[];
+extern const char *rrrr_msgs[];
+extern const char *pp_msgs[];
+extern const char *to_msgs[];
+extern const char *ii_msgs[];
+extern const char *ext_msgs[];
+
+/*
+ * relevant NB regs
+ */
+struct err_regs {
+	u32 nbcfg;
+	u32 nbsh;
+	u32 nbsl;
+	u32 nbeah;
+	u32 nbeal;
+};
+
+
+void amd_report_gart_errors(bool);
+void amd_register_ecc_decoder(void (*f)(int, struct err_regs *));
+void amd_unregister_ecc_decoder(void (*f)(int, struct err_regs *));
+void amd_decode_nb_mce(int, struct err_regs *, int);
+
+#endif /* _EDAC_MCE_AMD_H */
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
new file mode 100644
index 0000000..7e1374a
--- /dev/null
+++ b/drivers/edac/edac_module.c
@@ -0,0 +1,222 @@
+/*
+ * edac_module.c
+ *
+ * (C) 2007 www.softwarebitmaker.com
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Author: Doug Thompson <dougthompson@xmission.com>
+ *
+ */
+#include <linux/edac.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#define EDAC_VERSION "Ver: 2.1.0 " __DATE__
+
+#ifdef CONFIG_EDAC_DEBUG
+/* Values of 0 to 4 will generate output */
+int edac_debug_level = 2;
+EXPORT_SYMBOL_GPL(edac_debug_level);
+#endif
+
+/* scope is to module level only */
+struct workqueue_struct *edac_workqueue;
+
+/*
+ * sysfs object: /sys/devices/system/edac
+ *	need to export to other files in this modules
+ */
+static struct sysdev_class edac_class = {
+	.name = "edac",
+};
+static int edac_class_valid;
+
+/*
+ * edac_op_state_to_string()
+ */
+char *edac_op_state_to_string(int opstate)
+{
+	if (opstate == OP_RUNNING_POLL)
+		return "POLLED";
+	else if (opstate == OP_RUNNING_INTERRUPT)
+		return "INTERRUPT";
+	else if (opstate == OP_RUNNING_POLL_INTR)
+		return "POLL-INTR";
+	else if (opstate == OP_ALLOC)
+		return "ALLOC";
+	else if (opstate == OP_OFFLINE)
+		return "OFFLINE";
+
+	return "UNKNOWN";
+}
+
+/*
+ * edac_get_edac_class()
+ *
+ *	return pointer to the edac class of 'edac'
+ */
+struct sysdev_class *edac_get_edac_class(void)
+{
+	struct sysdev_class *classptr = NULL;
+
+	if (edac_class_valid)
+		classptr = &edac_class;
+
+	return classptr;
+}
+
+/*
+ * edac_register_sysfs_edac_name()
+ *
+ *	register the 'edac' into /sys/devices/system
+ *
+ * return:
+ *	0  success
+ *	!0 error
+ */
+static int edac_register_sysfs_edac_name(void)
+{
+	int err;
+
+	/* create the /sys/devices/system/edac directory */
+	err = sysdev_class_register(&edac_class);
+
+	if (err) {
+		debugf1("%s() error=%d\n", __func__, err);
+		return err;
+	}
+
+	edac_class_valid = 1;
+	return 0;
+}
+
+/*
+ * sysdev_class_unregister()
+ *
+ *	unregister the 'edac' from /sys/devices/system
+ */
+static void edac_unregister_sysfs_edac_name(void)
+{
+	/* only if currently registered, then unregister it */
+	if (edac_class_valid)
+		sysdev_class_unregister(&edac_class);
+
+	edac_class_valid = 0;
+}
+
+/*
+ * edac_workqueue_setup
+ *	initialize the edac work queue for polling operations
+ */
+static int edac_workqueue_setup(void)
+{
+	edac_workqueue = create_singlethread_workqueue("edac-poller");
+	if (edac_workqueue == NULL)
+		return -ENODEV;
+	else
+		return 0;
+}
+
+/*
+ * edac_workqueue_teardown
+ *	teardown the edac workqueue
+ */
+static void edac_workqueue_teardown(void)
+{
+	if (edac_workqueue) {
+		flush_workqueue(edac_workqueue);
+		destroy_workqueue(edac_workqueue);
+		edac_workqueue = NULL;
+	}
+}
+
+/*
+ * edac_init
+ *      module initialization entry point
+ */
+static int __init edac_init(void)
+{
+	int err = 0;
+
+	edac_printk(KERN_INFO, EDAC_MC, EDAC_VERSION "\n");
+
+	/*
+	 * Harvest and clear any boot/initialization PCI parity errors
+	 *
+	 * FIXME: This only clears errors logged by devices present at time of
+	 *      module initialization.  We should also do an initial clear
+	 *      of each newly hotplugged device.
+	 */
+	edac_pci_clear_parity_errors();
+
+	/*
+	 * perform the registration of the /sys/devices/system/edac class object
+	 */
+	if (edac_register_sysfs_edac_name()) {
+		edac_printk(KERN_ERR, EDAC_MC,
+			"Error initializing 'edac' kobject\n");
+		err = -ENODEV;
+		goto error;
+	}
+
+	/*
+	 * now set up the mc_kset under the edac class object
+	 */
+	err = edac_sysfs_setup_mc_kset();
+	if (err)
+		goto sysfs_setup_fail;
+
+	/* Setup/Initialize the workq for this core */
+	err = edac_workqueue_setup();
+	if (err) {
+		edac_printk(KERN_ERR, EDAC_MC, "init WorkQueue failure\n");
+		goto workq_fail;
+	}
+
+	return 0;
+
+	/* Error teardown stack */
+workq_fail:
+	edac_sysfs_teardown_mc_kset();
+
+sysfs_setup_fail:
+	edac_unregister_sysfs_edac_name();
+
+error:
+	return err;
+}
+
+/*
+ * edac_exit()
+ *      module exit/termination function
+ */
+static void __exit edac_exit(void)
+{
+	debugf0("%s()\n", __func__);
+
+	/* tear down the various subsystems */
+	edac_workqueue_teardown();
+	edac_sysfs_teardown_mc_kset();
+	edac_unregister_sysfs_edac_name();
+}
+
+/*
+ * Inform the kernel of our entry and exit points
+ */
+module_init(edac_init);
+module_exit(edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Doug Thompson www.softwarebitmaker.com, et al");
+MODULE_DESCRIPTION("Core library routines for EDAC reporting");
+
+/* refer to *_sysfs.c files for parameters that are exported via sysfs */
+
+#ifdef CONFIG_EDAC_DEBUG
+module_param(edac_debug_level, int, 0644);
+MODULE_PARM_DESC(edac_debug_level, "Debug level");
+#endif
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
new file mode 100644
index 0000000..233d479
--- /dev/null
+++ b/drivers/edac/edac_module.h
@@ -0,0 +1,84 @@
+
+/*
+ * edac_module.h
+ *
+ * For defining functions/data for within the EDAC_CORE module only
+ *
+ * written by doug thompson <norsk5@xmission.h>
+ */
+
+#ifndef	__EDAC_MODULE_H__
+#define	__EDAC_MODULE_H__
+
+#include <linux/sysdev.h>
+
+#include "edac_core.h"
+
+/*
+ * INTERNAL EDAC MODULE:
+ * EDAC memory controller sysfs create/remove functions
+ * and setup/teardown functions
+ *
+ * edac_mc objects
+ */
+extern int edac_sysfs_setup_mc_kset(void);
+extern void edac_sysfs_teardown_mc_kset(void);
+extern int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci);
+extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci);
+extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci);
+extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci);
+extern int edac_get_log_ue(void);
+extern int edac_get_log_ce(void);
+extern int edac_get_panic_on_ue(void);
+extern int edac_mc_get_log_ue(void);
+extern int edac_mc_get_log_ce(void);
+extern int edac_mc_get_panic_on_ue(void);
+extern int edac_get_poll_msec(void);
+extern int edac_mc_get_poll_msec(void);
+
+extern int edac_device_register_sysfs_main_kobj(
+				struct edac_device_ctl_info *edac_dev);
+extern void edac_device_unregister_sysfs_main_kobj(
+				struct edac_device_ctl_info *edac_dev);
+extern int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev);
+extern void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev);
+extern struct sysdev_class *edac_get_edac_class(void);
+
+/* edac core workqueue: single CPU mode */
+extern struct workqueue_struct *edac_workqueue;
+extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
+				    unsigned msec);
+extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev);
+extern void edac_device_reset_delay_period(struct edac_device_ctl_info
+					   *edac_dev, unsigned long value);
+extern void edac_mc_reset_delay_period(int value);
+
+extern void *edac_align_ptr(void *ptr, unsigned size);
+
+/*
+ * EDAC PCI functions
+ */
+#ifdef	CONFIG_PCI
+extern void edac_pci_do_parity_check(void);
+extern void edac_pci_clear_parity_errors(void);
+extern int edac_sysfs_pci_setup(void);
+extern void edac_sysfs_pci_teardown(void);
+extern int edac_pci_get_check_errors(void);
+extern int edac_pci_get_poll_msec(void);
+extern void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci);
+extern void edac_pci_handle_pe(struct edac_pci_ctl_info *pci, const char *msg);
+extern void edac_pci_handle_npe(struct edac_pci_ctl_info *pci,
+				const char *msg);
+#else				/* CONFIG_PCI */
+/* pre-process these away */
+#define edac_pci_do_parity_check()
+#define edac_pci_clear_parity_errors()
+#define edac_sysfs_pci_setup()  (0)
+#define edac_sysfs_pci_teardown()
+#define edac_pci_get_check_errors()
+#define edac_pci_get_poll_msec()
+#define edac_pci_handle_pe()
+#define edac_pci_handle_npe()
+#endif				/* CONFIG_PCI */
+
+#endif				/* __EDAC_MODULE_H__ */
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
new file mode 100644
index 0000000..efb5d56
--- /dev/null
+++ b/drivers/edac/edac_pci.c
@@ -0,0 +1,509 @@
+/*
+ * EDAC PCI component
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/highmem.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sysdev.h>
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+#include <asm/uaccess.h>
+#include <asm/page.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+static DEFINE_MUTEX(edac_pci_ctls_mutex);
+static LIST_HEAD(edac_pci_list);
+static atomic_t pci_indexes = ATOMIC_INIT(0);
+
+/*
+ * edac_pci_alloc_ctl_info
+ *
+ *	The alloc() function for the 'edac_pci' control info
+ *	structure. The chip driver will allocate one of these for each
+ *	edac_pci it is going to control/register with the EDAC CORE.
+ */
+struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
+						const char *edac_pci_name)
+{
+	struct edac_pci_ctl_info *pci;
+	void *pvt;
+	unsigned int size;
+
+	debugf1("%s()\n", __func__);
+
+	pci = (struct edac_pci_ctl_info *)0;
+	pvt = edac_align_ptr(&pci[1], sz_pvt);
+	size = ((unsigned long)pvt) + sz_pvt;
+
+	/* Alloc the needed control struct memory */
+	pci = kzalloc(size, GFP_KERNEL);
+	if (pci  == NULL)
+		return NULL;
+
+	/* Now much private space */
+	pvt = sz_pvt ? ((char *)pci) + ((unsigned long)pvt) : NULL;
+
+	pci->pvt_info = pvt;
+	pci->op_state = OP_ALLOC;
+
+	snprintf(pci->name, strlen(edac_pci_name) + 1, "%s", edac_pci_name);
+
+	return pci;
+}
+EXPORT_SYMBOL_GPL(edac_pci_alloc_ctl_info);
+
+/*
+ * edac_pci_free_ctl_info()
+ *
+ *	Last action on the pci control structure.
+ *
+ *	call the remove sysfs information, which will unregister
+ *	this control struct's kobj. When that kobj's ref count
+ *	goes to zero, its release function will be call and then
+ *	kfree() the memory.
+ */
+void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci)
+{
+	debugf1("%s()\n", __func__);
+
+	edac_pci_remove_sysfs(pci);
+}
+EXPORT_SYMBOL_GPL(edac_pci_free_ctl_info);
+
+/*
+ * find_edac_pci_by_dev()
+ * 	scans the edac_pci list for a specific 'struct device *'
+ *
+ *	return NULL if not found, or return control struct pointer
+ */
+static struct edac_pci_ctl_info *find_edac_pci_by_dev(struct device *dev)
+{
+	struct edac_pci_ctl_info *pci;
+	struct list_head *item;
+
+	debugf1("%s()\n", __func__);
+
+	list_for_each(item, &edac_pci_list) {
+		pci = list_entry(item, struct edac_pci_ctl_info, link);
+
+		if (pci->dev == dev)
+			return pci;
+	}
+
+	return NULL;
+}
+
+/*
+ * add_edac_pci_to_global_list
+ * 	Before calling this function, caller must assign a unique value to
+ * 	edac_dev->pci_idx.
+ * 	Return:
+ * 		0 on success
+ * 		1 on failure
+ */
+static int add_edac_pci_to_global_list(struct edac_pci_ctl_info *pci)
+{
+	struct list_head *item, *insert_before;
+	struct edac_pci_ctl_info *rover;
+
+	debugf1("%s()\n", __func__);
+
+	insert_before = &edac_pci_list;
+
+	/* Determine if already on the list */
+	rover = find_edac_pci_by_dev(pci->dev);
+	if (unlikely(rover != NULL))
+		goto fail0;
+
+	/* Insert in ascending order by 'pci_idx', so find position */
+	list_for_each(item, &edac_pci_list) {
+		rover = list_entry(item, struct edac_pci_ctl_info, link);
+
+		if (rover->pci_idx >= pci->pci_idx) {
+			if (unlikely(rover->pci_idx == pci->pci_idx))
+				goto fail1;
+
+			insert_before = item;
+			break;
+		}
+	}
+
+	list_add_tail_rcu(&pci->link, insert_before);
+	return 0;
+
+fail0:
+	edac_printk(KERN_WARNING, EDAC_PCI,
+		"%s (%s) %s %s already assigned %d\n",
+		dev_name(rover->dev), edac_dev_name(rover),
+		rover->mod_name, rover->ctl_name, rover->pci_idx);
+	return 1;
+
+fail1:
+	edac_printk(KERN_WARNING, EDAC_PCI,
+		"but in low-level driver: attempt to assign\n"
+		"\tduplicate pci_idx %d in %s()\n", rover->pci_idx,
+		__func__);
+	return 1;
+}
+
+/*
+ * complete_edac_pci_list_del
+ *
+ *	RCU completion callback to indicate item is deleted
+ */
+static void complete_edac_pci_list_del(struct rcu_head *head)
+{
+	struct edac_pci_ctl_info *pci;
+
+	pci = container_of(head, struct edac_pci_ctl_info, rcu);
+	INIT_LIST_HEAD(&pci->link);
+}
+
+/*
+ * del_edac_pci_from_global_list
+ *
+ *	remove the PCI control struct from the global list
+ */
+static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci)
+{
+	list_del_rcu(&pci->link);
+	call_rcu(&pci->rcu, complete_edac_pci_list_del);
+	rcu_barrier();
+}
+
+#if 0
+/* Older code, but might use in the future */
+
+/*
+ * edac_pci_find()
+ * 	Search for an edac_pci_ctl_info structure whose index is 'idx'
+ *
+ * If found, return a pointer to the structure
+ * Else return NULL.
+ *
+ * Caller must hold pci_ctls_mutex.
+ */
+struct edac_pci_ctl_info *edac_pci_find(int idx)
+{
+	struct list_head *item;
+	struct edac_pci_ctl_info *pci;
+
+	/* Iterage over list, looking for exact match of ID */
+	list_for_each(item, &edac_pci_list) {
+		pci = list_entry(item, struct edac_pci_ctl_info, link);
+
+		if (pci->pci_idx >= idx) {
+			if (pci->pci_idx == idx)
+				return pci;
+
+			/* not on list, so terminate early */
+			break;
+		}
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(edac_pci_find);
+#endif
+
+/*
+ * edac_pci_workq_function()
+ *
+ * 	periodic function that performs the operation
+ *	scheduled by a workq request, for a given PCI control struct
+ */
+static void edac_pci_workq_function(struct work_struct *work_req)
+{
+	struct delayed_work *d_work = to_delayed_work(work_req);
+	struct edac_pci_ctl_info *pci = to_edac_pci_ctl_work(d_work);
+	int msec;
+	unsigned long delay;
+
+	debugf3("%s() checking\n", __func__);
+
+	mutex_lock(&edac_pci_ctls_mutex);
+
+	if (pci->op_state == OP_RUNNING_POLL) {
+		/* we might be in POLL mode, but there may NOT be a poll func
+		 */
+		if ((pci->edac_check != NULL) && edac_pci_get_check_errors())
+			pci->edac_check(pci);
+
+		/* if we are on a one second period, then use round */
+		msec = edac_pci_get_poll_msec();
+		if (msec == 1000)
+			delay = round_jiffies_relative(msecs_to_jiffies(msec));
+		else
+			delay = msecs_to_jiffies(msec);
+
+		/* Reschedule only if we are in POLL mode */
+		queue_delayed_work(edac_workqueue, &pci->work, delay);
+	}
+
+	mutex_unlock(&edac_pci_ctls_mutex);
+}
+
+/*
+ * edac_pci_workq_setup()
+ * 	initialize a workq item for this edac_pci instance
+ * 	passing in the new delay period in msec
+ *
+ *	locking model:
+ *		called when 'edac_pci_ctls_mutex' is locked
+ */
+static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
+				 unsigned int msec)
+{
+	debugf0("%s()\n", __func__);
+
+	INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
+	queue_delayed_work(edac_workqueue, &pci->work,
+			msecs_to_jiffies(edac_pci_get_poll_msec()));
+}
+
+/*
+ * edac_pci_workq_teardown()
+ * 	stop the workq processing on this edac_pci instance
+ */
+static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
+{
+	int status;
+
+	debugf0("%s()\n", __func__);
+
+	status = cancel_delayed_work(&pci->work);
+	if (status == 0)
+		flush_workqueue(edac_workqueue);
+}
+
+/*
+ * edac_pci_reset_delay_period
+ *
+ *	called with a new period value for the workq period
+ *	a) stop current workq timer
+ *	b) restart workq timer with new value
+ */
+void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
+				 unsigned long value)
+{
+	debugf0("%s()\n", __func__);
+
+	edac_pci_workq_teardown(pci);
+
+	/* need to lock for the setup */
+	mutex_lock(&edac_pci_ctls_mutex);
+
+	edac_pci_workq_setup(pci, value);
+
+	mutex_unlock(&edac_pci_ctls_mutex);
+}
+EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
+
+/*
+ * edac_pci_alloc_index: Allocate a unique PCI index number
+ *
+ * Return:
+ *      allocated index number
+ *
+ */
+int edac_pci_alloc_index(void)
+{
+	return atomic_inc_return(&pci_indexes) - 1;
+}
+EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
+
+/*
+ * edac_pci_add_device: Insert the 'edac_dev' structure into the
+ * edac_pci global list and create sysfs entries associated with
+ * edac_pci structure.
+ * @pci: pointer to the edac_device structure to be added to the list
+ * @edac_idx: A unique numeric identifier to be assigned to the
+ * 'edac_pci' structure.
+ *
+ * Return:
+ *      0       Success
+ *      !0      Failure
+ */
+int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
+{
+	debugf0("%s()\n", __func__);
+
+	pci->pci_idx = edac_idx;
+	pci->start_time = jiffies;
+
+	mutex_lock(&edac_pci_ctls_mutex);
+
+	if (add_edac_pci_to_global_list(pci))
+		goto fail0;
+
+	if (edac_pci_create_sysfs(pci)) {
+		edac_pci_printk(pci, KERN_WARNING,
+				"failed to create sysfs pci\n");
+		goto fail1;
+	}
+
+	if (pci->edac_check != NULL) {
+		pci->op_state = OP_RUNNING_POLL;
+
+		edac_pci_workq_setup(pci, 1000);
+	} else {
+		pci->op_state = OP_RUNNING_INTERRUPT;
+	}
+
+	edac_pci_printk(pci, KERN_INFO,
+			"Giving out device to module '%s' controller '%s':"
+			" DEV '%s' (%s)\n",
+			pci->mod_name,
+			pci->ctl_name,
+			edac_dev_name(pci), edac_op_state_to_string(pci->op_state));
+
+	mutex_unlock(&edac_pci_ctls_mutex);
+	return 0;
+
+	/* error unwind stack */
+fail1:
+	del_edac_pci_from_global_list(pci);
+fail0:
+	mutex_unlock(&edac_pci_ctls_mutex);
+	return 1;
+}
+EXPORT_SYMBOL_GPL(edac_pci_add_device);
+
+/*
+ * edac_pci_del_device()
+ * 	Remove sysfs entries for specified edac_pci structure and
+ * 	then remove edac_pci structure from global list
+ *
+ * @dev:
+ * 	Pointer to 'struct device' representing edac_pci structure
+ * 	to remove
+ *
+ * Return:
+ * 	Pointer to removed edac_pci structure,
+ * 	or NULL if device not found
+ */
+struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
+{
+	struct edac_pci_ctl_info *pci;
+
+	debugf0("%s()\n", __func__);
+
+	mutex_lock(&edac_pci_ctls_mutex);
+
+	/* ensure the control struct is on the global list
+	 * if not, then leave
+	 */
+	pci = find_edac_pci_by_dev(dev);
+	if (pci  == NULL) {
+		mutex_unlock(&edac_pci_ctls_mutex);
+		return NULL;
+	}
+
+	pci->op_state = OP_OFFLINE;
+
+	del_edac_pci_from_global_list(pci);
+
+	mutex_unlock(&edac_pci_ctls_mutex);
+
+	/* stop the workq timer */
+	edac_pci_workq_teardown(pci);
+
+	edac_printk(KERN_INFO, EDAC_PCI,
+		"Removed device %d for %s %s: DEV %s\n",
+		pci->pci_idx, pci->mod_name, pci->ctl_name, edac_dev_name(pci));
+
+	return pci;
+}
+EXPORT_SYMBOL_GPL(edac_pci_del_device);
+
+/*
+ * edac_pci_generic_check
+ *
+ *	a Generic parity check API
+ */
+static void edac_pci_generic_check(struct edac_pci_ctl_info *pci)
+{
+	debugf4("%s()\n", __func__);
+	edac_pci_do_parity_check();
+}
+
+/* free running instance index counter */
+static int edac_pci_idx;
+#define EDAC_PCI_GENCTL_NAME	"EDAC PCI controller"
+
+struct edac_pci_gen_data {
+	int edac_idx;
+};
+
+/*
+ * edac_pci_create_generic_ctl
+ *
+ *	A generic constructor for a PCI parity polling device
+ *	Some systems have more than one domain of PCI busses.
+ *	For systems with one domain, then this API will
+ *	provide for a generic poller.
+ *
+ *	This routine calls the edac_pci_alloc_ctl_info() for
+ *	the generic device, with default values
+ */
+struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev,
+						const char *mod_name)
+{
+	struct edac_pci_ctl_info *pci;
+	struct edac_pci_gen_data *pdata;
+
+	pci = edac_pci_alloc_ctl_info(sizeof(*pdata), EDAC_PCI_GENCTL_NAME);
+	if (!pci)
+		return NULL;
+
+	pdata = pci->pvt_info;
+	pci->dev = dev;
+	dev_set_drvdata(pci->dev, pci);
+	pci->dev_name = pci_name(to_pci_dev(dev));
+
+	pci->mod_name = mod_name;
+	pci->ctl_name = EDAC_PCI_GENCTL_NAME;
+	pci->edac_check = edac_pci_generic_check;
+
+	pdata->edac_idx = edac_pci_idx++;
+
+	if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
+		debugf3("%s(): failed edac_pci_add_device()\n", __func__);
+		edac_pci_free_ctl_info(pci);
+		return NULL;
+	}
+
+	return pci;
+}
+EXPORT_SYMBOL_GPL(edac_pci_create_generic_ctl);
+
+/*
+ * edac_pci_release_generic_ctl
+ *
+ *	The release function of a generic EDAC PCI polling device
+ */
+void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci)
+{
+	debugf0("%s() pci mod=%s\n", __func__, pci->mod_name);
+
+	edac_pci_del_device(pci->dev);
+	edac_pci_free_ctl_info(pci);
+}
+EXPORT_SYMBOL_GPL(edac_pci_release_generic_ctl);
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
new file mode 100644
index 0000000..422728c
--- /dev/null
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -0,0 +1,767 @@
+/*
+ * (C) 2005, 2006 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written Doug Thompson <norsk5@xmission.com>
+ *
+ */
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/ctype.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+/* Turn off this whole feature if PCI is not configured */
+#ifdef CONFIG_PCI
+
+#define EDAC_PCI_SYMLINK	"device"
+
+/* data variables exported via sysfs */
+static int check_pci_errors;		/* default NO check PCI parity */
+static int edac_pci_panic_on_pe;	/* default NO panic on PCI Parity */
+static int edac_pci_log_pe = 1;		/* log PCI parity errors */
+static int edac_pci_log_npe = 1;	/* log PCI non-parity error errors */
+static int edac_pci_poll_msec = 1000;	/* one second workq period */
+
+static atomic_t pci_parity_count = ATOMIC_INIT(0);
+static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
+
+static struct kobject *edac_pci_top_main_kobj;
+static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
+
+/* getter functions for the data variables */
+int edac_pci_get_check_errors(void)
+{
+	return check_pci_errors;
+}
+
+static int edac_pci_get_log_pe(void)
+{
+	return edac_pci_log_pe;
+}
+
+static int edac_pci_get_log_npe(void)
+{
+	return edac_pci_log_npe;
+}
+
+static int edac_pci_get_panic_on_pe(void)
+{
+	return edac_pci_panic_on_pe;
+}
+
+int edac_pci_get_poll_msec(void)
+{
+	return edac_pci_poll_msec;
+}
+
+/**************************** EDAC PCI sysfs instance *******************/
+static ssize_t instance_pe_count_show(struct edac_pci_ctl_info *pci, char *data)
+{
+	return sprintf(data, "%u\n", atomic_read(&pci->counters.pe_count));
+}
+
+static ssize_t instance_npe_count_show(struct edac_pci_ctl_info *pci,
+				char *data)
+{
+	return sprintf(data, "%u\n", atomic_read(&pci->counters.npe_count));
+}
+
+#define to_instance(k) container_of(k, struct edac_pci_ctl_info, kobj)
+#define to_instance_attr(a) container_of(a, struct instance_attribute, attr)
+
+/* DEVICE instance kobject release() function */
+static void edac_pci_instance_release(struct kobject *kobj)
+{
+	struct edac_pci_ctl_info *pci;
+
+	debugf0("%s()\n", __func__);
+
+	/* Form pointer to containing struct, the pci control struct */
+	pci = to_instance(kobj);
+
+	/* decrement reference count on top main kobj */
+	kobject_put(edac_pci_top_main_kobj);
+
+	kfree(pci);	/* Free the control struct */
+}
+
+/* instance specific attribute structure */
+struct instance_attribute {
+	struct attribute attr;
+	ssize_t(*show) (struct edac_pci_ctl_info *, char *);
+	ssize_t(*store) (struct edac_pci_ctl_info *, const char *, size_t);
+};
+
+/* Function to 'show' fields from the edac_pci 'instance' structure */
+static ssize_t edac_pci_instance_show(struct kobject *kobj,
+				struct attribute *attr, char *buffer)
+{
+	struct edac_pci_ctl_info *pci = to_instance(kobj);
+	struct instance_attribute *instance_attr = to_instance_attr(attr);
+
+	if (instance_attr->show)
+		return instance_attr->show(pci, buffer);
+	return -EIO;
+}
+
+/* Function to 'store' fields into the edac_pci 'instance' structure */
+static ssize_t edac_pci_instance_store(struct kobject *kobj,
+				struct attribute *attr,
+				const char *buffer, size_t count)
+{
+	struct edac_pci_ctl_info *pci = to_instance(kobj);
+	struct instance_attribute *instance_attr = to_instance_attr(attr);
+
+	if (instance_attr->store)
+		return instance_attr->store(pci, buffer, count);
+	return -EIO;
+}
+
+/* fs_ops table */
+static struct sysfs_ops pci_instance_ops = {
+	.show = edac_pci_instance_show,
+	.store = edac_pci_instance_store
+};
+
+#define INSTANCE_ATTR(_name, _mode, _show, _store)	\
+static struct instance_attribute attr_instance_##_name = {	\
+	.attr	= {.name = __stringify(_name), .mode = _mode },	\
+	.show	= _show,					\
+	.store	= _store,					\
+};
+
+INSTANCE_ATTR(pe_count, S_IRUGO, instance_pe_count_show, NULL);
+INSTANCE_ATTR(npe_count, S_IRUGO, instance_npe_count_show, NULL);
+
+/* pci instance attributes */
+static struct instance_attribute *pci_instance_attr[] = {
+	&attr_instance_pe_count,
+	&attr_instance_npe_count,
+	NULL
+};
+
+/* the ktype for a pci instance */
+static struct kobj_type ktype_pci_instance = {
+	.release = edac_pci_instance_release,
+	.sysfs_ops = &pci_instance_ops,
+	.default_attrs = (struct attribute **)pci_instance_attr,
+};
+
+/*
+ * edac_pci_create_instance_kobj
+ *
+ *	construct one EDAC PCI instance's kobject for use
+ */
+static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
+{
+	struct kobject *main_kobj;
+	int err;
+
+	debugf0("%s()\n", __func__);
+
+	/* First bump the ref count on the top main kobj, which will
+	 * track the number of PCI instances we have, and thus nest
+	 * properly on keeping the module loaded
+	 */
+	main_kobj = kobject_get(edac_pci_top_main_kobj);
+	if (!main_kobj) {
+		err = -ENODEV;
+		goto error_out;
+	}
+
+	/* And now register this new kobject under the main kobj */
+	err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance,
+				   edac_pci_top_main_kobj, "pci%d", idx);
+	if (err != 0) {
+		debugf2("%s() failed to register instance pci%d\n",
+			__func__, idx);
+		kobject_put(edac_pci_top_main_kobj);
+		goto error_out;
+	}
+
+	kobject_uevent(&pci->kobj, KOBJ_ADD);
+	debugf1("%s() Register instance 'pci%d' kobject\n", __func__, idx);
+
+	return 0;
+
+	/* Error unwind statck */
+error_out:
+	return err;
+}
+
+/*
+ * edac_pci_unregister_sysfs_instance_kobj
+ *
+ *	unregister the kobj for the EDAC PCI instance
+ */
+static void edac_pci_unregister_sysfs_instance_kobj(
+			struct edac_pci_ctl_info *pci)
+{
+	debugf0("%s()\n", __func__);
+
+	/* Unregister the instance kobject and allow its release
+	 * function release the main reference count and then
+	 * kfree the memory
+	 */
+	kobject_put(&pci->kobj);
+}
+
+/***************************** EDAC PCI sysfs root **********************/
+#define to_edacpci(k) container_of(k, struct edac_pci_ctl_info, kobj)
+#define to_edacpci_attr(a) container_of(a, struct edac_pci_attr, attr)
+
+/* simple show/store functions for attributes */
+static ssize_t edac_pci_int_show(void *ptr, char *buffer)
+{
+	int *value = ptr;
+	return sprintf(buffer, "%d\n", *value);
+}
+
+static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
+{
+	int *value = ptr;
+
+	if (isdigit(*buffer))
+		*value = simple_strtoul(buffer, NULL, 0);
+
+	return count;
+}
+
+struct edac_pci_dev_attribute {
+	struct attribute attr;
+	void *value;
+	 ssize_t(*show) (void *, char *);
+	 ssize_t(*store) (void *, const char *, size_t);
+};
+
+/* Set of show/store abstract level functions for PCI Parity object */
+static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
+				 char *buffer)
+{
+	struct edac_pci_dev_attribute *edac_pci_dev;
+	edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
+
+	if (edac_pci_dev->show)
+		return edac_pci_dev->show(edac_pci_dev->value, buffer);
+	return -EIO;
+}
+
+static ssize_t edac_pci_dev_store(struct kobject *kobj,
+				struct attribute *attr, const char *buffer,
+				size_t count)
+{
+	struct edac_pci_dev_attribute *edac_pci_dev;
+	edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
+
+	if (edac_pci_dev->show)
+		return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
+	return -EIO;
+}
+
+static struct sysfs_ops edac_pci_sysfs_ops = {
+	.show = edac_pci_dev_show,
+	.store = edac_pci_dev_store
+};
+
+#define EDAC_PCI_ATTR(_name,_mode,_show,_store)			\
+static struct edac_pci_dev_attribute edac_pci_attr_##_name = {		\
+	.attr = {.name = __stringify(_name), .mode = _mode },	\
+	.value  = &_name,					\
+	.show   = _show,					\
+	.store  = _store,					\
+};
+
+#define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store)	\
+static struct edac_pci_dev_attribute edac_pci_attr_##_name = {		\
+	.attr = {.name = __stringify(_name), .mode = _mode },	\
+	.value  = _data,					\
+	.show   = _show,					\
+	.store  = _store,					\
+};
+
+/* PCI Parity control files */
+EDAC_PCI_ATTR(check_pci_errors, S_IRUGO | S_IWUSR, edac_pci_int_show,
+	edac_pci_int_store);
+EDAC_PCI_ATTR(edac_pci_log_pe, S_IRUGO | S_IWUSR, edac_pci_int_show,
+	edac_pci_int_store);
+EDAC_PCI_ATTR(edac_pci_log_npe, S_IRUGO | S_IWUSR, edac_pci_int_show,
+	edac_pci_int_store);
+EDAC_PCI_ATTR(edac_pci_panic_on_pe, S_IRUGO | S_IWUSR, edac_pci_int_show,
+	edac_pci_int_store);
+EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL);
+EDAC_PCI_ATTR(pci_nonparity_count, S_IRUGO, edac_pci_int_show, NULL);
+
+/* Base Attributes of the memory ECC object */
+static struct edac_pci_dev_attribute *edac_pci_attr[] = {
+	&edac_pci_attr_check_pci_errors,
+	&edac_pci_attr_edac_pci_log_pe,
+	&edac_pci_attr_edac_pci_log_npe,
+	&edac_pci_attr_edac_pci_panic_on_pe,
+	&edac_pci_attr_pci_parity_count,
+	&edac_pci_attr_pci_nonparity_count,
+	NULL,
+};
+
+/*
+ * edac_pci_release_main_kobj
+ *
+ *	This release function is called when the reference count to the
+ *	passed kobj goes to zero.
+ *
+ *	This kobj is the 'main' kobject that EDAC PCI instances
+ *	link to, and thus provide for proper nesting counts
+ */
+static void edac_pci_release_main_kobj(struct kobject *kobj)
+{
+	debugf0("%s() here to module_put(THIS_MODULE)\n", __func__);
+
+	kfree(kobj);
+
+	/* last reference to top EDAC PCI kobject has been removed,
+	 * NOW release our ref count on the core module
+	 */
+	module_put(THIS_MODULE);
+}
+
+/* ktype struct for the EDAC PCI main kobj */
+static struct kobj_type ktype_edac_pci_main_kobj = {
+	.release = edac_pci_release_main_kobj,
+	.sysfs_ops = &edac_pci_sysfs_ops,
+	.default_attrs = (struct attribute **)edac_pci_attr,
+};
+
+/**
+ * edac_pci_main_kobj_setup()
+ *
+ *	setup the sysfs for EDAC PCI attributes
+ *	assumes edac_class has already been initialized
+ */
+static int edac_pci_main_kobj_setup(void)
+{
+	int err;
+	struct sysdev_class *edac_class;
+
+	debugf0("%s()\n", __func__);
+
+	/* check and count if we have already created the main kobject */
+	if (atomic_inc_return(&edac_pci_sysfs_refcount) != 1)
+		return 0;
+
+	/* First time, so create the main kobject and its
+	 * controls and atributes
+	 */
+	edac_class = edac_get_edac_class();
+	if (edac_class == NULL) {
+		debugf1("%s() no edac_class\n", __func__);
+		err = -ENODEV;
+		goto decrement_count_fail;
+	}
+
+	/* Bump the reference count on this module to ensure the
+	 * modules isn't unloaded until we deconstruct the top
+	 * level main kobj for EDAC PCI
+	 */
+	if (!try_module_get(THIS_MODULE)) {
+		debugf1("%s() try_module_get() failed\n", __func__);
+		err = -ENODEV;
+		goto decrement_count_fail;
+	}
+
+	edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+	if (!edac_pci_top_main_kobj) {
+		debugf1("Failed to allocate\n");
+		err = -ENOMEM;
+		goto kzalloc_fail;
+	}
+
+	/* Instanstiate the pci object */
+	err = kobject_init_and_add(edac_pci_top_main_kobj,
+				   &ktype_edac_pci_main_kobj,
+				   &edac_class->kset.kobj, "pci");
+	if (err) {
+		debugf1("Failed to register '.../edac/pci'\n");
+		goto kobject_init_and_add_fail;
+	}
+
+	/* At this point, to 'release' the top level kobject
+	 * for EDAC PCI, then edac_pci_main_kobj_teardown()
+	 * must be used, for resources to be cleaned up properly
+	 */
+	kobject_uevent(edac_pci_top_main_kobj, KOBJ_ADD);
+	debugf1("Registered '.../edac/pci' kobject\n");
+
+	return 0;
+
+	/* Error unwind statck */
+kobject_init_and_add_fail:
+	kfree(edac_pci_top_main_kobj);
+
+kzalloc_fail:
+	module_put(THIS_MODULE);
+
+decrement_count_fail:
+	/* if are on this error exit, nothing to tear down */
+	atomic_dec(&edac_pci_sysfs_refcount);
+
+	return err;
+}
+
+/*
+ * edac_pci_main_kobj_teardown()
+ *
+ *	if no longer linked (needed) remove the top level EDAC PCI
+ *	kobject with its controls and attributes
+ */
+static void edac_pci_main_kobj_teardown(void)
+{
+	debugf0("%s()\n", __func__);
+
+	/* Decrement the count and only if no more controller instances
+	 * are connected perform the unregisteration of the top level
+	 * main kobj
+	 */
+	if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) {
+		debugf0("%s() called kobject_put on main kobj\n",
+			__func__);
+		kobject_put(edac_pci_top_main_kobj);
+	}
+}
+
+/*
+ *
+ * edac_pci_create_sysfs
+ *
+ *	Create the controls/attributes for the specified EDAC PCI device
+ */
+int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci)
+{
+	int err;
+	struct kobject *edac_kobj = &pci->kobj;
+
+	debugf0("%s() idx=%d\n", __func__, pci->pci_idx);
+
+	/* create the top main EDAC PCI kobject, IF needed */
+	err = edac_pci_main_kobj_setup();
+	if (err)
+		return err;
+
+	/* Create this instance's kobject under the MAIN kobject */
+	err = edac_pci_create_instance_kobj(pci, pci->pci_idx);
+	if (err)
+		goto unregister_cleanup;
+
+	err = sysfs_create_link(edac_kobj, &pci->dev->kobj, EDAC_PCI_SYMLINK);
+	if (err) {
+		debugf0("%s() sysfs_create_link() returned err= %d\n",
+			__func__, err);
+		goto symlink_fail;
+	}
+
+	return 0;
+
+	/* Error unwind stack */
+symlink_fail:
+	edac_pci_unregister_sysfs_instance_kobj(pci);
+
+unregister_cleanup:
+	edac_pci_main_kobj_teardown();
+
+	return err;
+}
+
+/*
+ * edac_pci_remove_sysfs
+ *
+ *	remove the controls and attributes for this EDAC PCI device
+ */
+void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci)
+{
+	debugf0("%s() index=%d\n", __func__, pci->pci_idx);
+
+	/* Remove the symlink */
+	sysfs_remove_link(&pci->kobj, EDAC_PCI_SYMLINK);
+
+	/* remove this PCI instance's sysfs entries */
+	edac_pci_unregister_sysfs_instance_kobj(pci);
+
+	/* Call the main unregister function, which will determine
+	 * if this 'pci' is the last instance.
+	 * If it is, the main kobject will be unregistered as a result
+	 */
+	debugf0("%s() calling edac_pci_main_kobj_teardown()\n", __func__);
+	edac_pci_main_kobj_teardown();
+}
+
+/************************ PCI error handling *************************/
+static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
+{
+	int where;
+	u16 status;
+
+	where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
+	pci_read_config_word(dev, where, &status);
+
+	/* If we get back 0xFFFF then we must suspect that the card has been
+	 * pulled but the Linux PCI layer has not yet finished cleaning up.
+	 * We don't want to report on such devices
+	 */
+
+	if (status == 0xFFFF) {
+		u32 sanity;
+
+		pci_read_config_dword(dev, 0, &sanity);
+
+		if (sanity == 0xFFFFFFFF)
+			return 0;
+	}
+
+	status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
+		PCI_STATUS_PARITY;
+
+	if (status)
+		/* reset only the bits we are interested in */
+		pci_write_config_word(dev, where, status);
+
+	return status;
+}
+
+
+/* Clear any PCI parity errors logged by this device. */
+static void edac_pci_dev_parity_clear(struct pci_dev *dev)
+{
+	u8 header_type;
+
+	debugf0("%s()\n", __func__);
+
+	get_pci_parity_status(dev, 0);
+
+	/* read the device TYPE, looking for bridges */
+	pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+
+	if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
+		get_pci_parity_status(dev, 1);
+}
+
+/*
+ *  PCI Parity polling
+ *
+ *	Fucntion to retrieve the current parity status
+ *	and decode it
+ *
+ */
+static void edac_pci_dev_parity_test(struct pci_dev *dev)
+{
+	unsigned long flags;
+	u16 status;
+	u8 header_type;
+
+	/* stop any interrupts until we can acquire the status */
+	local_irq_save(flags);
+
+	/* read the STATUS register on this device */
+	status = get_pci_parity_status(dev, 0);
+
+	/* read the device TYPE, looking for bridges */
+	pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+
+	local_irq_restore(flags);
+
+	debugf4("PCI STATUS= 0x%04x %s\n", status, dev_name(&dev->dev));
+
+	/* check the status reg for errors on boards NOT marked as broken
+	 * if broken, we cannot trust any of the status bits
+	 */
+	if (status && !dev->broken_parity_status) {
+		if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
+			edac_printk(KERN_CRIT, EDAC_PCI,
+				"Signaled System Error on %s\n",
+				pci_name(dev));
+			atomic_inc(&pci_nonparity_count);
+		}
+
+		if (status & (PCI_STATUS_PARITY)) {
+			edac_printk(KERN_CRIT, EDAC_PCI,
+				"Master Data Parity Error on %s\n",
+				pci_name(dev));
+
+			atomic_inc(&pci_parity_count);
+		}
+
+		if (status & (PCI_STATUS_DETECTED_PARITY)) {
+			edac_printk(KERN_CRIT, EDAC_PCI,
+				"Detected Parity Error on %s\n",
+				pci_name(dev));
+
+			atomic_inc(&pci_parity_count);
+		}
+	}
+
+
+	debugf4("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev_name(&dev->dev));
+
+	if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+		/* On bridges, need to examine secondary status register  */
+		status = get_pci_parity_status(dev, 1);
+
+		debugf4("PCI SEC_STATUS= 0x%04x %s\n", status, dev_name(&dev->dev));
+
+		/* check the secondary status reg for errors,
+		 * on NOT broken boards
+		 */
+		if (status && !dev->broken_parity_status) {
+			if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
+				edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
+					"Signaled System Error on %s\n",
+					pci_name(dev));
+				atomic_inc(&pci_nonparity_count);
+			}
+
+			if (status & (PCI_STATUS_PARITY)) {
+				edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
+					"Master Data Parity Error on "
+					"%s\n", pci_name(dev));
+
+				atomic_inc(&pci_parity_count);
+			}
+
+			if (status & (PCI_STATUS_DETECTED_PARITY)) {
+				edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
+					"Detected Parity Error on %s\n",
+					pci_name(dev));
+
+				atomic_inc(&pci_parity_count);
+			}
+		}
+	}
+}
+
+/* reduce some complexity in definition of the iterator */
+typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
+
+/*
+ * pci_dev parity list iterator
+ *	Scan the PCI device list for one pass, looking for SERRORs
+ *	Master Parity ERRORS or Parity ERRORs on primary or secondary devices
+ */
+static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
+{
+	struct pci_dev *dev = NULL;
+
+	/* request for kernel access to the next PCI device, if any,
+	 * and while we are looking at it have its reference count
+	 * bumped until we are done with it
+	 */
+	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+		fn(dev);
+	}
+}
+
+/*
+ * edac_pci_do_parity_check
+ *
+ *	performs the actual PCI parity check operation
+ */
+void edac_pci_do_parity_check(void)
+{
+	int before_count;
+
+	debugf3("%s()\n", __func__);
+
+	/* if policy has PCI check off, leave now */
+	if (!check_pci_errors)
+		return;
+
+	before_count = atomic_read(&pci_parity_count);
+
+	/* scan all PCI devices looking for a Parity Error on devices and
+	 * bridges.
+	 * The iterator calls pci_get_device() which might sleep, thus
+	 * we cannot disable interrupts in this scan.
+	 */
+	edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
+
+	/* Only if operator has selected panic on PCI Error */
+	if (edac_pci_get_panic_on_pe()) {
+		/* If the count is different 'after' from 'before' */
+		if (before_count != atomic_read(&pci_parity_count))
+			panic("EDAC: PCI Parity Error");
+	}
+}
+
+/*
+ * edac_pci_clear_parity_errors
+ *
+ *	function to perform an iteration over the PCI devices
+ *	and clearn their current status
+ */
+void edac_pci_clear_parity_errors(void)
+{
+	/* Clear any PCI bus parity errors that devices initially have logged
+	 * in their registers.
+	 */
+	edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
+}
+
+/*
+ * edac_pci_handle_pe
+ *
+ *	Called to handle a PARITY ERROR event
+ */
+void edac_pci_handle_pe(struct edac_pci_ctl_info *pci, const char *msg)
+{
+
+	/* global PE counter incremented by edac_pci_do_parity_check() */
+	atomic_inc(&pci->counters.pe_count);
+
+	if (edac_pci_get_log_pe())
+		edac_pci_printk(pci, KERN_WARNING,
+				"Parity Error ctl: %s %d: %s\n",
+				pci->ctl_name, pci->pci_idx, msg);
+
+	/*
+	 * poke all PCI devices and see which one is the troublemaker
+	 * panic() is called if set
+	 */
+	edac_pci_do_parity_check();
+}
+EXPORT_SYMBOL_GPL(edac_pci_handle_pe);
+
+
+/*
+ * edac_pci_handle_npe
+ *
+ *	Called to handle a NON-PARITY ERROR event
+ */
+void edac_pci_handle_npe(struct edac_pci_ctl_info *pci, const char *msg)
+{
+
+	/* global NPE counter incremented by edac_pci_do_parity_check() */
+	atomic_inc(&pci->counters.npe_count);
+
+	if (edac_pci_get_log_npe())
+		edac_pci_printk(pci, KERN_WARNING,
+				"Non-Parity Error ctl: %s %d: %s\n",
+				pci->ctl_name, pci->pci_idx, msg);
+
+	/*
+	 * poke all PCI devices and see which one is the troublemaker
+	 * panic() is called if set
+	 */
+	edac_pci_do_parity_check();
+}
+EXPORT_SYMBOL_GPL(edac_pci_handle_npe);
+
+/*
+ * Define the PCI parameter to the module
+ */
+module_param(check_pci_errors, int, 0644);
+MODULE_PARM_DESC(check_pci_errors,
+		 "Check for PCI bus parity errors: 0=off 1=on");
+module_param(edac_pci_panic_on_pe, int, 0644);
+MODULE_PARM_DESC(edac_pci_panic_on_pe,
+		 "Panic on PCI Bus Parity error: 0=off 1=on");
+
+#endif				/* CONFIG_PCI */
diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c
new file mode 100644
index 0000000..20b428a
--- /dev/null
+++ b/drivers/edac/edac_stub.c
@@ -0,0 +1,46 @@
+/*
+ * common EDAC components that must be in kernel
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#include <linux/module.h>
+#include <linux/edac.h>
+#include <asm/atomic.h>
+#include <asm/edac.h>
+
+int edac_op_state = EDAC_OPSTATE_INVAL;
+EXPORT_SYMBOL_GPL(edac_op_state);
+
+atomic_t edac_handlers = ATOMIC_INIT(0);
+EXPORT_SYMBOL_GPL(edac_handlers);
+
+int edac_err_assert = 0;
+EXPORT_SYMBOL_GPL(edac_err_assert);
+
+/*
+ * called to determine if there is an EDAC driver interested in
+ * knowing an event (such as NMI) occurred
+ */
+int edac_handler_set(void)
+{
+	if (edac_op_state == EDAC_OPSTATE_POLL)
+		return 0;
+
+	return atomic_read(&edac_handlers);
+}
+EXPORT_SYMBOL_GPL(edac_handler_set);
+
+/*
+ * handler for NMI type of interrupts to assert error
+ */
+void edac_atomic_assert_error(void)
+{
+	edac_err_assert++;
+}
+EXPORT_SYMBOL_GPL(edac_atomic_assert_error);
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
new file mode 100644
index 0000000..6c9a0f2
--- /dev/null
+++ b/drivers/edac/i3000_edac.c
@@ -0,0 +1,554 @@
+/*
+ * Intel 3000/3010 Memory Controller kernel module
+ * Copyright (C) 2007 Akamai Technologies, Inc.
+ * Shamelessly copied from:
+ * 	Intel D82875P Memory Controller kernel module
+ * 	(C) 2003 Linux Networx (http://lnxi.com)
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define I3000_REVISION		"1.1"
+
+#define EDAC_MOD_STR		"i3000_edac"
+
+#define I3000_RANKS		8
+#define I3000_RANKS_PER_CHANNEL	4
+#define I3000_CHANNELS		2
+
+/* Intel 3000 register addresses - device 0 function 0 - DRAM Controller */
+
+#define I3000_MCHBAR		0x44	/* MCH Memory Mapped Register BAR */
+#define I3000_MCHBAR_MASK	0xffffc000
+#define I3000_MMR_WINDOW_SIZE	16384
+
+#define I3000_EDEAP	0x70	/* Extended DRAM Error Address Pointer (8b)
+				 *
+				 * 7:1   reserved
+				 * 0     bit 32 of address
+				 */
+#define I3000_DEAP	0x58	/* DRAM Error Address Pointer (32b)
+				 *
+				 * 31:7  address
+				 * 6:1   reserved
+				 * 0     Error channel 0/1
+				 */
+#define I3000_DEAP_GRAIN 		(1 << 7)
+
+/*
+ * Helper functions to decode the DEAP/EDEAP hardware registers.
+ *
+ * The type promotion here is deliberate; we're deriving an
+ * unsigned long pfn and offset from hardware regs which are u8/u32.
+ */
+
+static inline unsigned long deap_pfn(u8 edeap, u32 deap)
+{
+	deap >>= PAGE_SHIFT;
+	deap |= (edeap & 1) << (32 - PAGE_SHIFT);
+	return deap;
+}
+
+static inline unsigned long deap_offset(u32 deap)
+{
+	return deap & ~(I3000_DEAP_GRAIN - 1) & ~PAGE_MASK;
+}
+
+static inline int deap_channel(u32 deap)
+{
+	return deap & 1;
+}
+
+#define I3000_DERRSYN	0x5c	/* DRAM Error Syndrome (8b)
+				 *
+				 *  7:0  DRAM ECC Syndrome
+				 */
+
+#define I3000_ERRSTS	0xc8	/* Error Status Register (16b)
+				 *
+				 * 15:12 reserved
+				 * 11    MCH Thermal Sensor Event
+				 *         for SMI/SCI/SERR
+				 * 10    reserved
+				 *  9    LOCK to non-DRAM Memory Flag (LCKF)
+				 *  8    Received Refresh Timeout Flag (RRTOF)
+				 *  7:2  reserved
+				 *  1    Multi-bit DRAM ECC Error Flag (DMERR)
+				 *  0    Single-bit DRAM ECC Error Flag (DSERR)
+				 */
+#define I3000_ERRSTS_BITS	0x0b03	/* bits which indicate errors */
+#define I3000_ERRSTS_UE		0x0002
+#define I3000_ERRSTS_CE		0x0001
+
+#define I3000_ERRCMD	0xca	/* Error Command (16b)
+				 *
+				 * 15:12 reserved
+				 * 11    SERR on MCH Thermal Sensor Event
+				 *         (TSESERR)
+				 * 10    reserved
+				 *  9    SERR on LOCK to non-DRAM Memory
+				 *         (LCKERR)
+				 *  8    SERR on DRAM Refresh Timeout
+				 *         (DRTOERR)
+				 *  7:2  reserved
+				 *  1    SERR Multi-Bit DRAM ECC Error
+				 *         (DMERR)
+				 *  0    SERR on Single-Bit ECC Error
+				 *         (DSERR)
+				 */
+
+/* Intel  MMIO register space - device 0 function 0 - MMR space */
+
+#define I3000_DRB_SHIFT 25	/* 32MiB grain */
+
+#define I3000_C0DRB	0x100	/* Channel 0 DRAM Rank Boundary (8b x 4)
+				 *
+				 * 7:0   Channel 0 DRAM Rank Boundary Address
+				 */
+#define I3000_C1DRB	0x180	/* Channel 1 DRAM Rank Boundary (8b x 4)
+				 *
+				 * 7:0   Channel 1 DRAM Rank Boundary Address
+				 */
+
+#define I3000_C0DRA	0x108	/* Channel 0 DRAM Rank Attribute (8b x 2)
+				 *
+				 * 7     reserved
+				 * 6:4   DRAM odd Rank Attribute
+				 * 3     reserved
+				 * 2:0   DRAM even Rank Attribute
+				 *
+				 * Each attribute defines the page
+				 * size of the corresponding rank:
+				 *     000: unpopulated
+				 *     001: reserved
+				 *     010: 4 KB
+				 *     011: 8 KB
+				 *     100: 16 KB
+				 *     Others: reserved
+				 */
+#define I3000_C1DRA	0x188	/* Channel 1 DRAM Rank Attribute (8b x 2) */
+
+static inline unsigned char odd_rank_attrib(unsigned char dra)
+{
+	return (dra & 0x70) >> 4;
+}
+
+static inline unsigned char even_rank_attrib(unsigned char dra)
+{
+	return dra & 0x07;
+}
+
+#define I3000_C0DRC0	0x120	/* DRAM Controller Mode 0 (32b)
+				 *
+				 * 31:30 reserved
+				 * 29    Initialization Complete (IC)
+				 * 28:11 reserved
+				 * 10:8  Refresh Mode Select (RMS)
+				 * 7     reserved
+				 * 6:4   Mode Select (SMS)
+				 * 3:2   reserved
+				 * 1:0   DRAM Type (DT)
+				 */
+
+#define I3000_C0DRC1	0x124	/* DRAM Controller Mode 1 (32b)
+				 *
+				 * 31    Enhanced Addressing Enable (ENHADE)
+				 * 30:0  reserved
+				 */
+
+enum i3000p_chips {
+	I3000 = 0,
+};
+
+struct i3000_dev_info {
+	const char *ctl_name;
+};
+
+struct i3000_error_info {
+	u16 errsts;
+	u8 derrsyn;
+	u8 edeap;
+	u32 deap;
+	u16 errsts2;
+};
+
+static const struct i3000_dev_info i3000_devs[] = {
+	[I3000] = {
+		.ctl_name = "i3000"},
+};
+
+static struct pci_dev *mci_pdev;
+static int i3000_registered = 1;
+static struct edac_pci_ctl_info *i3000_pci;
+
+static void i3000_get_error_info(struct mem_ctl_info *mci,
+				 struct i3000_error_info *info)
+{
+	struct pci_dev *pdev;
+
+	pdev = to_pci_dev(mci->dev);
+
+	/*
+	 * This is a mess because there is no atomic way to read all the
+	 * registers at once and the registers can transition from CE being
+	 * overwritten by UE.
+	 */
+	pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts);
+	if (!(info->errsts & I3000_ERRSTS_BITS))
+		return;
+	pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
+	pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
+	pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
+	pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts2);
+
+	/*
+	 * If the error is the same for both reads then the first set
+	 * of reads is valid.  If there is a change then there is a CE
+	 * with no info and the second set of reads is valid and
+	 * should be UE info.
+	 */
+	if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
+		pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
+		pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
+		pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
+	}
+
+	/*
+	 * Clear any error bits.
+	 * (Yes, we really clear bits by writing 1 to them.)
+	 */
+	pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
+			 I3000_ERRSTS_BITS);
+}
+
+static int i3000_process_error_info(struct mem_ctl_info *mci,
+				struct i3000_error_info *info,
+				int handle_errors)
+{
+	int row, multi_chan, channel;
+	unsigned long pfn, offset;
+
+	multi_chan = mci->csrows[0].nr_channels - 1;
+
+	if (!(info->errsts & I3000_ERRSTS_BITS))
+		return 0;
+
+	if (!handle_errors)
+		return 1;
+
+	if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
+		edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+		info->errsts = info->errsts2;
+	}
+
+	pfn = deap_pfn(info->edeap, info->deap);
+	offset = deap_offset(info->deap);
+	channel = deap_channel(info->deap);
+
+	row = edac_mc_find_csrow_by_page(mci, pfn);
+
+	if (info->errsts & I3000_ERRSTS_UE)
+		edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE");
+	else
+		edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row,
+				multi_chan ? channel : 0, "i3000 CE");
+
+	return 1;
+}
+
+static void i3000_check(struct mem_ctl_info *mci)
+{
+	struct i3000_error_info info;
+
+	debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+	i3000_get_error_info(mci, &info);
+	i3000_process_error_info(mci, &info, 1);
+}
+
+static int i3000_is_interleaved(const unsigned char *c0dra,
+				const unsigned char *c1dra,
+				const unsigned char *c0drb,
+				const unsigned char *c1drb)
+{
+	int i;
+
+	/*
+	 * If the channels aren't populated identically then
+	 * we're not interleaved.
+	 */
+	for (i = 0; i < I3000_RANKS_PER_CHANNEL / 2; i++)
+		if (odd_rank_attrib(c0dra[i]) != odd_rank_attrib(c1dra[i]) ||
+			even_rank_attrib(c0dra[i]) !=
+						even_rank_attrib(c1dra[i]))
+			return 0;
+
+	/*
+	 * If the rank boundaries for the two channels are different
+	 * then we're not interleaved.
+	 */
+	for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++)
+		if (c0drb[i] != c1drb[i])
+			return 0;
+
+	return 1;
+}
+
+static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
+{
+	int rc;
+	int i;
+	struct mem_ctl_info *mci = NULL;
+	unsigned long last_cumul_size;
+	int interleaved, nr_channels;
+	unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS];
+	unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
+	unsigned char *c0drb = drb, *c1drb = &drb[I3000_RANKS_PER_CHANNEL];
+	unsigned long mchbar;
+	void __iomem *window;
+
+	debugf0("MC: %s()\n", __func__);
+
+	pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar);
+	mchbar &= I3000_MCHBAR_MASK;
+	window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE);
+	if (!window) {
+		printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n",
+			mchbar);
+		return -ENODEV;
+	}
+
+	c0dra[0] = readb(window + I3000_C0DRA + 0);	/* ranks 0,1 */
+	c0dra[1] = readb(window + I3000_C0DRA + 1);	/* ranks 2,3 */
+	c1dra[0] = readb(window + I3000_C1DRA + 0);	/* ranks 0,1 */
+	c1dra[1] = readb(window + I3000_C1DRA + 1);	/* ranks 2,3 */
+
+	for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++) {
+		c0drb[i] = readb(window + I3000_C0DRB + i);
+		c1drb[i] = readb(window + I3000_C1DRB + i);
+	}
+
+	iounmap(window);
+
+	/*
+	 * Figure out how many channels we have.
+	 *
+	 * If we have what the datasheet calls "asymmetric channels"
+	 * (essentially the same as what was called "virtual single
+	 * channel mode" in the i82875) then it's a single channel as
+	 * far as EDAC is concerned.
+	 */
+	interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb);
+	nr_channels = interleaved ? 2 : 1;
+	mci = edac_mc_alloc(0, I3000_RANKS / nr_channels, nr_channels, 0);
+	if (!mci)
+		return -ENOMEM;
+
+	debugf3("MC: %s(): init mci\n", __func__);
+
+	mci->dev = &pdev->dev;
+	mci->mtype_cap = MEM_FLAG_DDR2;
+
+	mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+	mci->edac_cap = EDAC_FLAG_SECDED;
+
+	mci->mod_name = EDAC_MOD_STR;
+	mci->mod_ver = I3000_REVISION;
+	mci->ctl_name = i3000_devs[dev_idx].ctl_name;
+	mci->dev_name = pci_name(pdev);
+	mci->edac_check = i3000_check;
+	mci->ctl_page_to_phys = NULL;
+
+	/*
+	 * The dram rank boundary (DRB) reg values are boundary addresses
+	 * for each DRAM rank with a granularity of 32MB.  DRB regs are
+	 * cumulative; the last one will contain the total memory
+	 * contained in all ranks.
+	 *
+	 * If we're in interleaved mode then we're only walking through
+	 * the ranks of controller 0, so we double all the values we see.
+	 */
+	for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) {
+		u8 value;
+		u32 cumul_size;
+		struct csrow_info *csrow = &mci->csrows[i];
+
+		value = drb[i];
+		cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT);
+		if (interleaved)
+			cumul_size <<= 1;
+		debugf3("MC: %s(): (%d) cumul_size 0x%x\n",
+			__func__, i, cumul_size);
+		if (cumul_size == last_cumul_size) {
+			csrow->mtype = MEM_EMPTY;
+			continue;
+		}
+
+		csrow->first_page = last_cumul_size;
+		csrow->last_page = cumul_size - 1;
+		csrow->nr_pages = cumul_size - last_cumul_size;
+		last_cumul_size = cumul_size;
+		csrow->grain = I3000_DEAP_GRAIN;
+		csrow->mtype = MEM_DDR2;
+		csrow->dtype = DEV_UNKNOWN;
+		csrow->edac_mode = EDAC_UNKNOWN;
+	}
+
+	/*
+	 * Clear any error bits.
+	 * (Yes, we really clear bits by writing 1 to them.)
+	 */
+	pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
+			 I3000_ERRSTS_BITS);
+
+	rc = -ENODEV;
+	if (edac_mc_add_mc(mci)) {
+		debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
+		goto fail;
+	}
+
+	/* allocating generic PCI control info */
+	i3000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!i3000_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n",
+			__func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
+
+	/* get this far and it's successful */
+	debugf3("MC: %s(): success\n", __func__);
+	return 0;
+
+fail:
+	if (mci)
+		edac_mc_free(mci);
+
+	return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i3000_init_one(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	int rc;
+
+	debugf0("MC: %s()\n", __func__);
+
+	if (pci_enable_device(pdev) < 0)
+		return -EIO;
+
+	rc = i3000_probe1(pdev, ent->driver_data);
+	if (!mci_pdev)
+		mci_pdev = pci_dev_get(pdev);
+
+	return rc;
+}
+
+static void __devexit i3000_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+
+	debugf0("%s()\n", __func__);
+
+	if (i3000_pci)
+		edac_pci_release_generic_ctl(i3000_pci);
+
+	mci = edac_mc_del_mc(&pdev->dev);
+	if (!mci)
+		return;
+
+	edac_mc_free(mci);
+}
+
+static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
+	{
+	 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 I3000},
+	{
+	 0,
+	 }			/* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i3000_pci_tbl);
+
+static struct pci_driver i3000_driver = {
+	.name = EDAC_MOD_STR,
+	.probe = i3000_init_one,
+	.remove = __devexit_p(i3000_remove_one),
+	.id_table = i3000_pci_tbl,
+};
+
+static int __init i3000_init(void)
+{
+	int pci_rc;
+
+	debugf3("MC: %s()\n", __func__);
+
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
+	pci_rc = pci_register_driver(&i3000_driver);
+	if (pci_rc < 0)
+		goto fail0;
+
+	if (!mci_pdev) {
+		i3000_registered = 0;
+		mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+					PCI_DEVICE_ID_INTEL_3000_HB, NULL);
+		if (!mci_pdev) {
+			debugf0("i3000 pci_get_device fail\n");
+			pci_rc = -ENODEV;
+			goto fail1;
+		}
+
+		pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl);
+		if (pci_rc < 0) {
+			debugf0("i3000 init fail\n");
+			pci_rc = -ENODEV;
+			goto fail1;
+		}
+	}
+
+	return 0;
+
+fail1:
+	pci_unregister_driver(&i3000_driver);
+
+fail0:
+	if (mci_pdev)
+		pci_dev_put(mci_pdev);
+
+	return pci_rc;
+}
+
+static void __exit i3000_exit(void)
+{
+	debugf3("MC: %s()\n", __func__);
+
+	pci_unregister_driver(&i3000_driver);
+	if (!i3000_registered) {
+		i3000_remove_one(mci_pdev);
+		pci_dev_put(mci_pdev);
+	}
+}
+
+module_init(i3000_init);
+module_exit(i3000_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Akamai Technologies Arthur Ulfeldt/Jason Uhlenkott");
+MODULE_DESCRIPTION("MC support for Intel 3000 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
new file mode 100644
index 0000000..fde4db9
--- /dev/null
+++ b/drivers/edac/i3200_edac.c
@@ -0,0 +1,527 @@
+/*
+ * Intel 3200/3210 Memory Controller kernel module
+ * Copyright (C) 2008-2009 Akamai Technologies, Inc.
+ * Portions by Hitoshi Mitake <h.mitake@gmail.com>.
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include <linux/io.h>
+#include "edac_core.h"
+
+#define I3200_REVISION        "1.1"
+
+#define EDAC_MOD_STR        "i3200_edac"
+
+#define PCI_DEVICE_ID_INTEL_3200_HB    0x29f0
+
+#define I3200_RANKS		8
+#define I3200_RANKS_PER_CHANNEL	4
+#define I3200_CHANNELS		2
+
+/* Intel 3200 register addresses - device 0 function 0 - DRAM Controller */
+
+#define I3200_MCHBAR_LOW	0x48	/* MCH Memory Mapped Register BAR */
+#define I3200_MCHBAR_HIGH	0x4c
+#define I3200_MCHBAR_MASK	0xfffffc000ULL	/* bits 35:14 */
+#define I3200_MMR_WINDOW_SIZE	16384
+
+#define I3200_TOM		0xa0	/* Top of Memory (16b)
+		 *
+		 * 15:10 reserved
+		 *  9:0  total populated physical memory
+		 */
+#define I3200_TOM_MASK		0x3ff	/* bits 9:0 */
+#define I3200_TOM_SHIFT		26	/* 64MiB grain */
+
+#define I3200_ERRSTS		0xc8	/* Error Status Register (16b)
+		 *
+		 * 15    reserved
+		 * 14    Isochronous TBWRR Run Behind FIFO Full
+		 *       (ITCV)
+		 * 13    Isochronous TBWRR Run Behind FIFO Put
+		 *       (ITSTV)
+		 * 12    reserved
+		 * 11    MCH Thermal Sensor Event
+		 *       for SMI/SCI/SERR (GTSE)
+		 * 10    reserved
+		 *  9    LOCK to non-DRAM Memory Flag (LCKF)
+		 *  8    reserved
+		 *  7    DRAM Throttle Flag (DTF)
+		 *  6:2  reserved
+		 *  1    Multi-bit DRAM ECC Error Flag (DMERR)
+		 *  0    Single-bit DRAM ECC Error Flag (DSERR)
+		 */
+#define I3200_ERRSTS_UE		0x0002
+#define I3200_ERRSTS_CE		0x0001
+#define I3200_ERRSTS_BITS	(I3200_ERRSTS_UE | I3200_ERRSTS_CE)
+
+
+/* Intel  MMIO register space - device 0 function 0 - MMR space */
+
+#define I3200_C0DRB	0x200	/* Channel 0 DRAM Rank Boundary (16b x 4)
+		 *
+		 * 15:10 reserved
+		 *  9:0  Channel 0 DRAM Rank Boundary Address
+		 */
+#define I3200_C1DRB	0x600	/* Channel 1 DRAM Rank Boundary (16b x 4) */
+#define I3200_DRB_MASK	0x3ff	/* bits 9:0 */
+#define I3200_DRB_SHIFT	26	/* 64MiB grain */
+
+#define I3200_C0ECCERRLOG	0x280	/* Channel 0 ECC Error Log (64b)
+		 *
+		 * 63:48 Error Column Address (ERRCOL)
+		 * 47:32 Error Row Address (ERRROW)
+		 * 31:29 Error Bank Address (ERRBANK)
+		 * 28:27 Error Rank Address (ERRRANK)
+		 * 26:24 reserved
+		 * 23:16 Error Syndrome (ERRSYND)
+		 * 15: 2 reserved
+		 *    1  Multiple Bit Error Status (MERRSTS)
+		 *    0  Correctable Error Status (CERRSTS)
+		 */
+#define I3200_C1ECCERRLOG		0x680	/* Chan 1 ECC Error Log (64b) */
+#define I3200_ECCERRLOG_CE		0x1
+#define I3200_ECCERRLOG_UE		0x2
+#define I3200_ECCERRLOG_RANK_BITS	0x18000000
+#define I3200_ECCERRLOG_RANK_SHIFT	27
+#define I3200_ECCERRLOG_SYNDROME_BITS	0xff0000
+#define I3200_ECCERRLOG_SYNDROME_SHIFT	16
+#define I3200_CAPID0			0xe0	/* P.95 of spec for details */
+
+struct i3200_priv {
+	void __iomem *window;
+};
+
+static int nr_channels;
+
+static int how_many_channels(struct pci_dev *pdev)
+{
+	unsigned char capid0_8b; /* 8th byte of CAPID0 */
+
+	pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b);
+	if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
+		debugf0("In single channel mode.\n");
+		return 1;
+	} else {
+		debugf0("In dual channel mode.\n");
+		return 2;
+	}
+}
+
+static unsigned long eccerrlog_syndrome(u64 log)
+{
+	return (log & I3200_ECCERRLOG_SYNDROME_BITS) >>
+		I3200_ECCERRLOG_SYNDROME_SHIFT;
+}
+
+static int eccerrlog_row(int channel, u64 log)
+{
+	u64 rank = ((log & I3200_ECCERRLOG_RANK_BITS) >>
+		I3200_ECCERRLOG_RANK_SHIFT);
+	return rank | (channel * I3200_RANKS_PER_CHANNEL);
+}
+
+enum i3200_chips {
+	I3200 = 0,
+};
+
+struct i3200_dev_info {
+	const char *ctl_name;
+};
+
+struct i3200_error_info {
+	u16 errsts;
+	u16 errsts2;
+	u64 eccerrlog[I3200_CHANNELS];
+};
+
+static const struct i3200_dev_info i3200_devs[] = {
+	[I3200] = {
+		.ctl_name = "i3200"
+	},
+};
+
+static struct pci_dev *mci_pdev;
+static int i3200_registered = 1;
+
+
+static void i3200_clear_error_info(struct mem_ctl_info *mci)
+{
+	struct pci_dev *pdev;
+
+	pdev = to_pci_dev(mci->dev);
+
+	/*
+	 * Clear any error bits.
+	 * (Yes, we really clear bits by writing 1 to them.)
+	 */
+	pci_write_bits16(pdev, I3200_ERRSTS, I3200_ERRSTS_BITS,
+		I3200_ERRSTS_BITS);
+}
+
+static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci,
+		struct i3200_error_info *info)
+{
+	struct pci_dev *pdev;
+	struct i3200_priv *priv = mci->pvt_info;
+	void __iomem *window = priv->window;
+
+	pdev = to_pci_dev(mci->dev);
+
+	/*
+	 * This is a mess because there is no atomic way to read all the
+	 * registers at once and the registers can transition from CE being
+	 * overwritten by UE.
+	 */
+	pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts);
+	if (!(info->errsts & I3200_ERRSTS_BITS))
+		return;
+
+	info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG);
+	if (nr_channels == 2)
+		info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG);
+
+	pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts2);
+
+	/*
+	 * If the error is the same for both reads then the first set
+	 * of reads is valid.  If there is a change then there is a CE
+	 * with no info and the second set of reads is valid and
+	 * should be UE info.
+	 */
+	if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
+		info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG);
+		if (nr_channels == 2)
+			info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG);
+	}
+
+	i3200_clear_error_info(mci);
+}
+
+static void i3200_process_error_info(struct mem_ctl_info *mci,
+		struct i3200_error_info *info)
+{
+	int channel;
+	u64 log;
+
+	if (!(info->errsts & I3200_ERRSTS_BITS))
+		return;
+
+	if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
+		edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+		info->errsts = info->errsts2;
+	}
+
+	for (channel = 0; channel < nr_channels; channel++) {
+		log = info->eccerrlog[channel];
+		if (log & I3200_ECCERRLOG_UE) {
+			edac_mc_handle_ue(mci, 0, 0,
+				eccerrlog_row(channel, log),
+				"i3200 UE");
+		} else if (log & I3200_ECCERRLOG_CE) {
+			edac_mc_handle_ce(mci, 0, 0,
+				eccerrlog_syndrome(log),
+				eccerrlog_row(channel, log), 0,
+				"i3200 CE");
+		}
+	}
+}
+
+static void i3200_check(struct mem_ctl_info *mci)
+{
+	struct i3200_error_info info;
+
+	debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+	i3200_get_and_clear_error_info(mci, &info);
+	i3200_process_error_info(mci, &info);
+}
+
+
+void __iomem *i3200_map_mchbar(struct pci_dev *pdev)
+{
+	union {
+		u64 mchbar;
+		struct {
+			u32 mchbar_low;
+			u32 mchbar_high;
+		};
+	} u;
+	void __iomem *window;
+
+	pci_read_config_dword(pdev, I3200_MCHBAR_LOW, &u.mchbar_low);
+	pci_read_config_dword(pdev, I3200_MCHBAR_HIGH, &u.mchbar_high);
+	u.mchbar &= I3200_MCHBAR_MASK;
+
+	if (u.mchbar != (resource_size_t)u.mchbar) {
+		printk(KERN_ERR
+			"i3200: mmio space beyond accessible range (0x%llx)\n",
+			(unsigned long long)u.mchbar);
+		return NULL;
+	}
+
+	window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE);
+	if (!window)
+		printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n",
+			(unsigned long long)u.mchbar);
+
+	return window;
+}
+
+
+static void i3200_get_drbs(void __iomem *window,
+	u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL])
+{
+	int i;
+
+	for (i = 0; i < I3200_RANKS_PER_CHANNEL; i++) {
+		drbs[0][i] = readw(window + I3200_C0DRB + 2*i) & I3200_DRB_MASK;
+		drbs[1][i] = readw(window + I3200_C1DRB + 2*i) & I3200_DRB_MASK;
+	}
+}
+
+static bool i3200_is_stacked(struct pci_dev *pdev,
+	u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL])
+{
+	u16 tom;
+
+	pci_read_config_word(pdev, I3200_TOM, &tom);
+	tom &= I3200_TOM_MASK;
+
+	return drbs[I3200_CHANNELS - 1][I3200_RANKS_PER_CHANNEL - 1] == tom;
+}
+
+static unsigned long drb_to_nr_pages(
+	u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL], bool stacked,
+	int channel, int rank)
+{
+	int n;
+
+	n = drbs[channel][rank];
+	if (rank > 0)
+		n -= drbs[channel][rank - 1];
+	if (stacked && (channel == 1) &&
+	drbs[channel][rank] == drbs[channel][I3200_RANKS_PER_CHANNEL - 1])
+		n -= drbs[0][I3200_RANKS_PER_CHANNEL - 1];
+
+	n <<= (I3200_DRB_SHIFT - PAGE_SHIFT);
+	return n;
+}
+
+static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
+{
+	int rc;
+	int i;
+	struct mem_ctl_info *mci = NULL;
+	unsigned long last_page;
+	u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL];
+	bool stacked;
+	void __iomem *window;
+	struct i3200_priv *priv;
+
+	debugf0("MC: %s()\n", __func__);
+
+	window = i3200_map_mchbar(pdev);
+	if (!window)
+		return -ENODEV;
+
+	i3200_get_drbs(window, drbs);
+	nr_channels = how_many_channels(pdev);
+
+	mci = edac_mc_alloc(sizeof(struct i3200_priv), I3200_RANKS,
+		nr_channels, 0);
+	if (!mci)
+		return -ENOMEM;
+
+	debugf3("MC: %s(): init mci\n", __func__);
+
+	mci->dev = &pdev->dev;
+	mci->mtype_cap = MEM_FLAG_DDR2;
+
+	mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+	mci->edac_cap = EDAC_FLAG_SECDED;
+
+	mci->mod_name = EDAC_MOD_STR;
+	mci->mod_ver = I3200_REVISION;
+	mci->ctl_name = i3200_devs[dev_idx].ctl_name;
+	mci->dev_name = pci_name(pdev);
+	mci->edac_check = i3200_check;
+	mci->ctl_page_to_phys = NULL;
+	priv = mci->pvt_info;
+	priv->window = window;
+
+	stacked = i3200_is_stacked(pdev, drbs);
+
+	/*
+	 * The dram rank boundary (DRB) reg values are boundary addresses
+	 * for each DRAM rank with a granularity of 64MB.  DRB regs are
+	 * cumulative; the last one will contain the total memory
+	 * contained in all ranks.
+	 */
+	last_page = -1UL;
+	for (i = 0; i < mci->nr_csrows; i++) {
+		unsigned long nr_pages;
+		struct csrow_info *csrow = &mci->csrows[i];
+
+		nr_pages = drb_to_nr_pages(drbs, stacked,
+			i / I3200_RANKS_PER_CHANNEL,
+			i % I3200_RANKS_PER_CHANNEL);
+
+		if (nr_pages == 0) {
+			csrow->mtype = MEM_EMPTY;
+			continue;
+		}
+
+		csrow->first_page = last_page + 1;
+		last_page += nr_pages;
+		csrow->last_page = last_page;
+		csrow->nr_pages = nr_pages;
+
+		csrow->grain = nr_pages << PAGE_SHIFT;
+		csrow->mtype = MEM_DDR2;
+		csrow->dtype = DEV_UNKNOWN;
+		csrow->edac_mode = EDAC_UNKNOWN;
+	}
+
+	i3200_clear_error_info(mci);
+
+	rc = -ENODEV;
+	if (edac_mc_add_mc(mci)) {
+		debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
+		goto fail;
+	}
+
+	/* get this far and it's successful */
+	debugf3("MC: %s(): success\n", __func__);
+	return 0;
+
+fail:
+	iounmap(window);
+	if (mci)
+		edac_mc_free(mci);
+
+	return rc;
+}
+
+static int __devinit i3200_init_one(struct pci_dev *pdev,
+		const struct pci_device_id *ent)
+{
+	int rc;
+
+	debugf0("MC: %s()\n", __func__);
+
+	if (pci_enable_device(pdev) < 0)
+		return -EIO;
+
+	rc = i3200_probe1(pdev, ent->driver_data);
+	if (!mci_pdev)
+		mci_pdev = pci_dev_get(pdev);
+
+	return rc;
+}
+
+static void __devexit i3200_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+	struct i3200_priv *priv;
+
+	debugf0("%s()\n", __func__);
+
+	mci = edac_mc_del_mc(&pdev->dev);
+	if (!mci)
+		return;
+
+	priv = mci->pvt_info;
+	iounmap(priv->window);
+
+	edac_mc_free(mci);
+}
+
+static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
+	{
+		PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		I3200},
+	{
+		0,
+	}            /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i3200_pci_tbl);
+
+static struct pci_driver i3200_driver = {
+	.name = EDAC_MOD_STR,
+	.probe = i3200_init_one,
+	.remove = __devexit_p(i3200_remove_one),
+	.id_table = i3200_pci_tbl,
+};
+
+static int __init i3200_init(void)
+{
+	int pci_rc;
+
+	debugf3("MC: %s()\n", __func__);
+
+	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
+	opstate_init();
+
+	pci_rc = pci_register_driver(&i3200_driver);
+	if (pci_rc < 0)
+		goto fail0;
+
+	if (!mci_pdev) {
+		i3200_registered = 0;
+		mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+				PCI_DEVICE_ID_INTEL_3200_HB, NULL);
+		if (!mci_pdev) {
+			debugf0("i3200 pci_get_device fail\n");
+			pci_rc = -ENODEV;
+			goto fail1;
+		}
+
+		pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl);
+		if (pci_rc < 0) {
+			debugf0("i3200 init fail\n");
+			pci_rc = -ENODEV;
+			goto fail1;
+		}
+	}
+
+	return 0;
+
+fail1:
+	pci_unregister_driver(&i3200_driver);
+
+fail0:
+	if (mci_pdev)
+		pci_dev_put(mci_pdev);
+
+	return pci_rc;
+}
+
+static void __exit i3200_exit(void)
+{
+	debugf3("MC: %s()\n", __func__);
+
+	pci_unregister_driver(&i3200_driver);
+	if (!i3200_registered) {
+		i3200_remove_one(mci_pdev);
+		pci_dev_put(mci_pdev);
+	}
+}
+
+module_init(i3200_init);
+module_exit(i3200_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Akamai Technologies, Inc.");
+MODULE_DESCRIPTION("MC support for Intel 3200 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
new file mode 100644
index 0000000..adc10a2
--- /dev/null
+++ b/drivers/edac/i5000_edac.c
@@ -0,0 +1,1580 @@
+/*
+ * Intel 5000(P/V/X) class Memory Controllers kernel module
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Douglas Thompson Linux Networx (http://lnxi.com)
+ *	norsk5@xmission.com
+ *
+ * This module is based on the following document:
+ *
+ * Intel 5000X Chipset Memory Controller Hub (MCH) - Datasheet
+ * 	http://developer.intel.com/design/chipsets/datashts/313070.htm
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include <asm/mmzone.h>
+
+#include "edac_core.h"
+
+/*
+ * Alter this version for the I5000 module when modifications are made
+ */
+#define I5000_REVISION    " Ver: 2.0.12 " __DATE__
+#define EDAC_MOD_STR      "i5000_edac"
+
+#define i5000_printk(level, fmt, arg...) \
+        edac_printk(level, "i5000", fmt, ##arg)
+
+#define i5000_mc_printk(mci, level, fmt, arg...) \
+        edac_mc_chipset_printk(mci, level, "i5000", fmt, ##arg)
+
+#ifndef PCI_DEVICE_ID_INTEL_FBD_0
+#define PCI_DEVICE_ID_INTEL_FBD_0	0x25F5
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_FBD_1
+#define PCI_DEVICE_ID_INTEL_FBD_1	0x25F6
+#endif
+
+/* Device 16,
+ * Function 0: System Address
+ * Function 1: Memory Branch Map, Control, Errors Register
+ * Function 2: FSB Error Registers
+ *
+ * All 3 functions of Device 16 (0,1,2) share the SAME DID
+ */
+#define	PCI_DEVICE_ID_INTEL_I5000_DEV16	0x25F0
+
+/* OFFSETS for Function 0 */
+
+/* OFFSETS for Function 1 */
+#define		AMBASE			0x48
+#define		MAXCH			0x56
+#define		MAXDIMMPERCH		0x57
+#define		TOLM			0x6C
+#define		REDMEMB			0x7C
+#define			RED_ECC_LOCATOR(x)	((x) & 0x3FFFF)
+#define			REC_ECC_LOCATOR_EVEN(x)	((x) & 0x001FF)
+#define			REC_ECC_LOCATOR_ODD(x)	((x) & 0x3FE00)
+#define		MIR0			0x80
+#define		MIR1			0x84
+#define		MIR2			0x88
+#define		AMIR0			0x8C
+#define		AMIR1			0x90
+#define		AMIR2			0x94
+
+#define		FERR_FAT_FBD		0x98
+#define		NERR_FAT_FBD		0x9C
+#define			EXTRACT_FBDCHAN_INDX(x)	(((x)>>28) & 0x3)
+#define			FERR_FAT_FBDCHAN 0x30000000
+#define			FERR_FAT_M3ERR	0x00000004
+#define			FERR_FAT_M2ERR	0x00000002
+#define			FERR_FAT_M1ERR	0x00000001
+#define			FERR_FAT_MASK	(FERR_FAT_M1ERR | \
+						FERR_FAT_M2ERR | \
+						FERR_FAT_M3ERR)
+
+#define		FERR_NF_FBD		0xA0
+
+/* Thermal and SPD or BFD errors */
+#define			FERR_NF_M28ERR	0x01000000
+#define			FERR_NF_M27ERR	0x00800000
+#define			FERR_NF_M26ERR	0x00400000
+#define			FERR_NF_M25ERR	0x00200000
+#define			FERR_NF_M24ERR	0x00100000
+#define			FERR_NF_M23ERR	0x00080000
+#define			FERR_NF_M22ERR	0x00040000
+#define			FERR_NF_M21ERR	0x00020000
+
+/* Correctable errors */
+#define			FERR_NF_M20ERR	0x00010000
+#define			FERR_NF_M19ERR	0x00008000
+#define			FERR_NF_M18ERR	0x00004000
+#define			FERR_NF_M17ERR	0x00002000
+
+/* Non-Retry or redundant Retry errors */
+#define			FERR_NF_M16ERR	0x00001000
+#define			FERR_NF_M15ERR	0x00000800
+#define			FERR_NF_M14ERR	0x00000400
+#define			FERR_NF_M13ERR	0x00000200
+
+/* Uncorrectable errors */
+#define			FERR_NF_M12ERR	0x00000100
+#define			FERR_NF_M11ERR	0x00000080
+#define			FERR_NF_M10ERR	0x00000040
+#define			FERR_NF_M9ERR	0x00000020
+#define			FERR_NF_M8ERR	0x00000010
+#define			FERR_NF_M7ERR	0x00000008
+#define			FERR_NF_M6ERR	0x00000004
+#define			FERR_NF_M5ERR	0x00000002
+#define			FERR_NF_M4ERR	0x00000001
+
+#define			FERR_NF_UNCORRECTABLE	(FERR_NF_M12ERR | \
+							FERR_NF_M11ERR | \
+							FERR_NF_M10ERR | \
+							FERR_NF_M9ERR | \
+							FERR_NF_M8ERR | \
+							FERR_NF_M7ERR | \
+							FERR_NF_M6ERR | \
+							FERR_NF_M5ERR | \
+							FERR_NF_M4ERR)
+#define			FERR_NF_CORRECTABLE	(FERR_NF_M20ERR | \
+							FERR_NF_M19ERR | \
+							FERR_NF_M18ERR | \
+							FERR_NF_M17ERR)
+#define			FERR_NF_DIMM_SPARE	(FERR_NF_M27ERR | \
+							FERR_NF_M28ERR)
+#define			FERR_NF_THERMAL		(FERR_NF_M26ERR | \
+							FERR_NF_M25ERR | \
+							FERR_NF_M24ERR | \
+							FERR_NF_M23ERR)
+#define			FERR_NF_SPD_PROTOCOL	(FERR_NF_M22ERR)
+#define			FERR_NF_NORTH_CRC	(FERR_NF_M21ERR)
+#define			FERR_NF_NON_RETRY	(FERR_NF_M13ERR | \
+							FERR_NF_M14ERR | \
+							FERR_NF_M15ERR)
+
+#define		NERR_NF_FBD		0xA4
+#define			FERR_NF_MASK		(FERR_NF_UNCORRECTABLE | \
+							FERR_NF_CORRECTABLE | \
+							FERR_NF_DIMM_SPARE | \
+							FERR_NF_THERMAL | \
+							FERR_NF_SPD_PROTOCOL | \
+							FERR_NF_NORTH_CRC | \
+							FERR_NF_NON_RETRY)
+
+#define		EMASK_FBD		0xA8
+#define			EMASK_FBD_M28ERR	0x08000000
+#define			EMASK_FBD_M27ERR	0x04000000
+#define			EMASK_FBD_M26ERR	0x02000000
+#define			EMASK_FBD_M25ERR	0x01000000
+#define			EMASK_FBD_M24ERR	0x00800000
+#define			EMASK_FBD_M23ERR	0x00400000
+#define			EMASK_FBD_M22ERR	0x00200000
+#define			EMASK_FBD_M21ERR	0x00100000
+#define			EMASK_FBD_M20ERR	0x00080000
+#define			EMASK_FBD_M19ERR	0x00040000
+#define			EMASK_FBD_M18ERR	0x00020000
+#define			EMASK_FBD_M17ERR	0x00010000
+
+#define			EMASK_FBD_M15ERR	0x00004000
+#define			EMASK_FBD_M14ERR	0x00002000
+#define			EMASK_FBD_M13ERR	0x00001000
+#define			EMASK_FBD_M12ERR	0x00000800
+#define			EMASK_FBD_M11ERR	0x00000400
+#define			EMASK_FBD_M10ERR	0x00000200
+#define			EMASK_FBD_M9ERR		0x00000100
+#define			EMASK_FBD_M8ERR		0x00000080
+#define			EMASK_FBD_M7ERR		0x00000040
+#define			EMASK_FBD_M6ERR		0x00000020
+#define			EMASK_FBD_M5ERR		0x00000010
+#define			EMASK_FBD_M4ERR		0x00000008
+#define			EMASK_FBD_M3ERR		0x00000004
+#define			EMASK_FBD_M2ERR		0x00000002
+#define			EMASK_FBD_M1ERR		0x00000001
+
+#define			ENABLE_EMASK_FBD_FATAL_ERRORS	(EMASK_FBD_M1ERR | \
+							EMASK_FBD_M2ERR | \
+							EMASK_FBD_M3ERR)
+
+#define 		ENABLE_EMASK_FBD_UNCORRECTABLE	(EMASK_FBD_M4ERR | \
+							EMASK_FBD_M5ERR | \
+							EMASK_FBD_M6ERR | \
+							EMASK_FBD_M7ERR | \
+							EMASK_FBD_M8ERR | \
+							EMASK_FBD_M9ERR | \
+							EMASK_FBD_M10ERR | \
+							EMASK_FBD_M11ERR | \
+							EMASK_FBD_M12ERR)
+#define 		ENABLE_EMASK_FBD_CORRECTABLE	(EMASK_FBD_M17ERR | \
+							EMASK_FBD_M18ERR | \
+							EMASK_FBD_M19ERR | \
+							EMASK_FBD_M20ERR)
+#define			ENABLE_EMASK_FBD_DIMM_SPARE	(EMASK_FBD_M27ERR | \
+							EMASK_FBD_M28ERR)
+#define			ENABLE_EMASK_FBD_THERMALS	(EMASK_FBD_M26ERR | \
+							EMASK_FBD_M25ERR | \
+							EMASK_FBD_M24ERR | \
+							EMASK_FBD_M23ERR)
+#define			ENABLE_EMASK_FBD_SPD_PROTOCOL	(EMASK_FBD_M22ERR)
+#define			ENABLE_EMASK_FBD_NORTH_CRC	(EMASK_FBD_M21ERR)
+#define			ENABLE_EMASK_FBD_NON_RETRY	(EMASK_FBD_M15ERR | \
+							EMASK_FBD_M14ERR | \
+							EMASK_FBD_M13ERR)
+
+#define		ENABLE_EMASK_ALL	(ENABLE_EMASK_FBD_NON_RETRY | \
+					ENABLE_EMASK_FBD_NORTH_CRC | \
+					ENABLE_EMASK_FBD_SPD_PROTOCOL | \
+					ENABLE_EMASK_FBD_THERMALS | \
+					ENABLE_EMASK_FBD_DIMM_SPARE | \
+					ENABLE_EMASK_FBD_FATAL_ERRORS | \
+					ENABLE_EMASK_FBD_CORRECTABLE | \
+					ENABLE_EMASK_FBD_UNCORRECTABLE)
+
+#define		ERR0_FBD		0xAC
+#define		ERR1_FBD		0xB0
+#define		ERR2_FBD		0xB4
+#define		MCERR_FBD		0xB8
+#define		NRECMEMA		0xBE
+#define			NREC_BANK(x)		(((x)>>12) & 0x7)
+#define			NREC_RDWR(x)		(((x)>>11) & 1)
+#define			NREC_RANK(x)		(((x)>>8) & 0x7)
+#define		NRECMEMB		0xC0
+#define			NREC_CAS(x)		(((x)>>16) & 0xFFFFFF)
+#define			NREC_RAS(x)		((x) & 0x7FFF)
+#define		NRECFGLOG		0xC4
+#define		NREEECFBDA		0xC8
+#define		NREEECFBDB		0xCC
+#define		NREEECFBDC		0xD0
+#define		NREEECFBDD		0xD4
+#define		NREEECFBDE		0xD8
+#define		REDMEMA			0xDC
+#define		RECMEMA			0xE2
+#define			REC_BANK(x)		(((x)>>12) & 0x7)
+#define			REC_RDWR(x)		(((x)>>11) & 1)
+#define			REC_RANK(x)		(((x)>>8) & 0x7)
+#define		RECMEMB			0xE4
+#define			REC_CAS(x)		(((x)>>16) & 0xFFFFFF)
+#define			REC_RAS(x)		((x) & 0x7FFF)
+#define		RECFGLOG		0xE8
+#define		RECFBDA			0xEC
+#define		RECFBDB			0xF0
+#define		RECFBDC			0xF4
+#define		RECFBDD			0xF8
+#define		RECFBDE			0xFC
+
+/* OFFSETS for Function 2 */
+
+/*
+ * Device 21,
+ * Function 0: Memory Map Branch 0
+ *
+ * Device 22,
+ * Function 0: Memory Map Branch 1
+ */
+#define PCI_DEVICE_ID_I5000_BRANCH_0	0x25F5
+#define PCI_DEVICE_ID_I5000_BRANCH_1	0x25F6
+
+#define AMB_PRESENT_0	0x64
+#define AMB_PRESENT_1	0x66
+#define MTR0		0x80
+#define MTR1		0x84
+#define MTR2		0x88
+#define MTR3		0x8C
+
+#define NUM_MTRS		4
+#define CHANNELS_PER_BRANCH	(2)
+
+/* Defines to extract the vaious fields from the
+ *	MTRx - Memory Technology Registers
+ */
+#define MTR_DIMMS_PRESENT(mtr)		((mtr) & (0x1 << 8))
+#define MTR_DRAM_WIDTH(mtr)		((((mtr) >> 6) & 0x1) ? 8 : 4)
+#define MTR_DRAM_BANKS(mtr)		((((mtr) >> 5) & 0x1) ? 8 : 4)
+#define MTR_DRAM_BANKS_ADDR_BITS(mtr)	((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2)
+#define MTR_DIMM_RANK(mtr)		(((mtr) >> 4) & 0x1)
+#define MTR_DIMM_RANK_ADDR_BITS(mtr)	(MTR_DIMM_RANK(mtr) ? 2 : 1)
+#define MTR_DIMM_ROWS(mtr)		(((mtr) >> 2) & 0x3)
+#define MTR_DIMM_ROWS_ADDR_BITS(mtr)	(MTR_DIMM_ROWS(mtr) + 13)
+#define MTR_DIMM_COLS(mtr)		((mtr) & 0x3)
+#define MTR_DIMM_COLS_ADDR_BITS(mtr)	(MTR_DIMM_COLS(mtr) + 10)
+
+#ifdef CONFIG_EDAC_DEBUG
+static char *numrow_toString[] = {
+	"8,192 - 13 rows",
+	"16,384 - 14 rows",
+	"32,768 - 15 rows",
+	"reserved"
+};
+
+static char *numcol_toString[] = {
+	"1,024 - 10 columns",
+	"2,048 - 11 columns",
+	"4,096 - 12 columns",
+	"reserved"
+};
+#endif
+
+/* enables the report of miscellaneous messages as CE errors - default off */
+static int misc_messages;
+
+/* Enumeration of supported devices */
+enum i5000_chips {
+	I5000P = 0,
+	I5000V = 1,		/* future */
+	I5000X = 2		/* future */
+};
+
+/* Device name and register DID (Device ID) */
+struct i5000_dev_info {
+	const char *ctl_name;	/* name for this device */
+	u16 fsb_mapping_errors;	/* DID for the branchmap,control */
+};
+
+/* Table of devices attributes supported by this driver */
+static const struct i5000_dev_info i5000_devs[] = {
+	[I5000P] = {
+		.ctl_name = "I5000",
+		.fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I5000_DEV16,
+	},
+};
+
+struct i5000_dimm_info {
+	int megabytes;		/* size, 0 means not present  */
+	int dual_rank;
+};
+
+#define	MAX_CHANNELS	6	/* max possible channels */
+#define MAX_CSROWS	(8*2)	/* max possible csrows per channel */
+
+/* driver private data structure */
+struct i5000_pvt {
+	struct pci_dev *system_address;	/* 16.0 */
+	struct pci_dev *branchmap_werrors;	/* 16.1 */
+	struct pci_dev *fsb_error_regs;	/* 16.2 */
+	struct pci_dev *branch_0;	/* 21.0 */
+	struct pci_dev *branch_1;	/* 22.0 */
+
+	u16 tolm;		/* top of low memory */
+	u64 ambase;		/* AMB BAR */
+
+	u16 mir0, mir1, mir2;
+
+	u16 b0_mtr[NUM_MTRS];	/* Memory Technlogy Reg */
+	u16 b0_ambpresent0;	/* Branch 0, Channel 0 */
+	u16 b0_ambpresent1;	/* Brnach 0, Channel 1 */
+
+	u16 b1_mtr[NUM_MTRS];	/* Memory Technlogy Reg */
+	u16 b1_ambpresent0;	/* Branch 1, Channel 8 */
+	u16 b1_ambpresent1;	/* Branch 1, Channel 1 */
+
+	/* DIMM information matrix, allocating architecture maximums */
+	struct i5000_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS];
+
+	/* Actual values for this controller */
+	int maxch;		/* Max channels */
+	int maxdimmperch;	/* Max DIMMs per channel */
+};
+
+/* I5000 MCH error information retrieved from Hardware */
+struct i5000_error_info {
+
+	/* These registers are always read from the MC */
+	u32 ferr_fat_fbd;	/* First Errors Fatal */
+	u32 nerr_fat_fbd;	/* Next Errors Fatal */
+	u32 ferr_nf_fbd;	/* First Errors Non-Fatal */
+	u32 nerr_nf_fbd;	/* Next Errors Non-Fatal */
+
+	/* These registers are input ONLY if there was a Recoverable  Error */
+	u32 redmemb;		/* Recoverable Mem Data Error log B */
+	u16 recmema;		/* Recoverable Mem Error log A */
+	u32 recmemb;		/* Recoverable Mem Error log B */
+
+	/* These registers are input ONLY if there was a
+	 * Non-Recoverable Error */
+	u16 nrecmema;		/* Non-Recoverable Mem log A */
+	u16 nrecmemb;		/* Non-Recoverable Mem log B */
+
+};
+
+static struct edac_pci_ctl_info *i5000_pci;
+
+/*
+ *	i5000_get_error_info	Retrieve the hardware error information from
+ *				the hardware and cache it in the 'info'
+ *				structure
+ */
+static void i5000_get_error_info(struct mem_ctl_info *mci,
+				 struct i5000_error_info *info)
+{
+	struct i5000_pvt *pvt;
+	u32 value;
+
+	pvt = mci->pvt_info;
+
+	/* read in the 1st FATAL error register */
+	pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value);
+
+	/* Mask only the bits that the doc says are valid
+	 */
+	value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK);
+
+	/* If there is an error, then read in the */
+	/* NEXT FATAL error register and the Memory Error Log Register A */
+	if (value & FERR_FAT_MASK) {
+		info->ferr_fat_fbd = value;
+
+		/* harvest the various error data we need */
+		pci_read_config_dword(pvt->branchmap_werrors,
+				NERR_FAT_FBD, &info->nerr_fat_fbd);
+		pci_read_config_word(pvt->branchmap_werrors,
+				NRECMEMA, &info->nrecmema);
+		pci_read_config_word(pvt->branchmap_werrors,
+				NRECMEMB, &info->nrecmemb);
+
+		/* Clear the error bits, by writing them back */
+		pci_write_config_dword(pvt->branchmap_werrors,
+				FERR_FAT_FBD, value);
+	} else {
+		info->ferr_fat_fbd = 0;
+		info->nerr_fat_fbd = 0;
+		info->nrecmema = 0;
+		info->nrecmemb = 0;
+	}
+
+	/* read in the 1st NON-FATAL error register */
+	pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value);
+
+	/* If there is an error, then read in the 1st NON-FATAL error
+	 * register as well */
+	if (value & FERR_NF_MASK) {
+		info->ferr_nf_fbd = value;
+
+		/* harvest the various error data we need */
+		pci_read_config_dword(pvt->branchmap_werrors,
+				NERR_NF_FBD, &info->nerr_nf_fbd);
+		pci_read_config_word(pvt->branchmap_werrors,
+				RECMEMA, &info->recmema);
+		pci_read_config_dword(pvt->branchmap_werrors,
+				RECMEMB, &info->recmemb);
+		pci_read_config_dword(pvt->branchmap_werrors,
+				REDMEMB, &info->redmemb);
+
+		/* Clear the error bits, by writing them back */
+		pci_write_config_dword(pvt->branchmap_werrors,
+				FERR_NF_FBD, value);
+	} else {
+		info->ferr_nf_fbd = 0;
+		info->nerr_nf_fbd = 0;
+		info->recmema = 0;
+		info->recmemb = 0;
+		info->redmemb = 0;
+	}
+}
+
+/*
+ * i5000_process_fatal_error_info(struct mem_ctl_info *mci,
+ * 					struct i5000_error_info *info,
+ * 					int handle_errors);
+ *
+ *	handle the Intel FATAL errors, if any
+ */
+static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
+					struct i5000_error_info *info,
+					int handle_errors)
+{
+	char msg[EDAC_MC_LABEL_LEN + 1 + 160];
+	char *specific = NULL;
+	u32 allErrors;
+	int branch;
+	int channel;
+	int bank;
+	int rank;
+	int rdwr;
+	int ras, cas;
+
+	/* mask off the Error bits that are possible */
+	allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK);
+	if (!allErrors)
+		return;		/* if no error, return now */
+
+	branch = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
+	channel = branch;
+
+	/* Use the NON-Recoverable macros to extract data */
+	bank = NREC_BANK(info->nrecmema);
+	rank = NREC_RANK(info->nrecmema);
+	rdwr = NREC_RDWR(info->nrecmema);
+	ras = NREC_RAS(info->nrecmemb);
+	cas = NREC_CAS(info->nrecmemb);
+
+	debugf0("\t\tCSROW= %d  Channels= %d,%d  (Branch= %d "
+		"DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+		rank, channel, channel + 1, branch >> 1, bank,
+		rdwr ? "Write" : "Read", ras, cas);
+
+	/* Only 1 bit will be on */
+	switch (allErrors) {
+	case FERR_FAT_M1ERR:
+		specific = "Alert on non-redundant retry or fast "
+				"reset timeout";
+		break;
+	case FERR_FAT_M2ERR:
+		specific = "Northbound CRC error on non-redundant "
+				"retry";
+		break;
+	case FERR_FAT_M3ERR:
+		{
+		static int done;
+
+		/*
+		 * This error is generated to inform that the intelligent
+		 * throttling is disabled and the temperature passed the
+		 * specified middle point. Since this is something the BIOS
+		 * should take care of, we'll warn only once to avoid
+		 * worthlessly flooding the log.
+		 */
+		if (done)
+			return;
+		done++;
+
+		specific = ">Tmid Thermal event with intelligent "
+			   "throttling disabled";
+		}
+		break;
+	}
+
+	/* Form out message */
+	snprintf(msg, sizeof(msg),
+		 "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d CAS=%d "
+		 "FATAL Err=0x%x (%s))",
+		 branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
+		 allErrors, specific);
+
+	/* Call the helper to output message */
+	edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+}
+
+/*
+ * i5000_process_fatal_error_info(struct mem_ctl_info *mci,
+ * 				struct i5000_error_info *info,
+ * 				int handle_errors);
+ *
+ *	handle the Intel NON-FATAL errors, if any
+ */
+static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
+					struct i5000_error_info *info,
+					int handle_errors)
+{
+	char msg[EDAC_MC_LABEL_LEN + 1 + 170];
+	char *specific = NULL;
+	u32 allErrors;
+	u32 ue_errors;
+	u32 ce_errors;
+	u32 misc_errors;
+	int branch;
+	int channel;
+	int bank;
+	int rank;
+	int rdwr;
+	int ras, cas;
+
+	/* mask off the Error bits that are possible */
+	allErrors = (info->ferr_nf_fbd & FERR_NF_MASK);
+	if (!allErrors)
+		return;		/* if no error, return now */
+
+	/* ONLY ONE of the possible error bits will be set, as per the docs */
+	ue_errors = allErrors & FERR_NF_UNCORRECTABLE;
+	if (ue_errors) {
+		debugf0("\tUncorrected bits= 0x%x\n", ue_errors);
+
+		branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
+
+		/*
+		 * According with i5000 datasheet, bit 28 has no significance
+		 * for errors M4Err-M12Err and M17Err-M21Err, on FERR_NF_FBD
+		 */
+		channel = branch & 2;
+
+		bank = NREC_BANK(info->nrecmema);
+		rank = NREC_RANK(info->nrecmema);
+		rdwr = NREC_RDWR(info->nrecmema);
+		ras = NREC_RAS(info->nrecmemb);
+		cas = NREC_CAS(info->nrecmemb);
+
+		debugf0
+			("\t\tCSROW= %d  Channels= %d,%d  (Branch= %d "
+			"DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+			rank, channel, channel + 1, branch >> 1, bank,
+			rdwr ? "Write" : "Read", ras, cas);
+
+		switch (ue_errors) {
+		case FERR_NF_M12ERR:
+			specific = "Non-Aliased Uncorrectable Patrol Data ECC";
+			break;
+		case FERR_NF_M11ERR:
+			specific = "Non-Aliased Uncorrectable Spare-Copy "
+					"Data ECC";
+			break;
+		case FERR_NF_M10ERR:
+			specific = "Non-Aliased Uncorrectable Mirrored Demand "
+					"Data ECC";
+			break;
+		case FERR_NF_M9ERR:
+			specific = "Non-Aliased Uncorrectable Non-Mirrored "
+					"Demand Data ECC";
+			break;
+		case FERR_NF_M8ERR:
+			specific = "Aliased Uncorrectable Patrol Data ECC";
+			break;
+		case FERR_NF_M7ERR:
+			specific = "Aliased Uncorrectable Spare-Copy Data ECC";
+			break;
+		case FERR_NF_M6ERR:
+			specific = "Aliased Uncorrectable Mirrored Demand "
+					"Data ECC";
+			break;
+		case FERR_NF_M5ERR:
+			specific = "Aliased Uncorrectable Non-Mirrored Demand "
+					"Data ECC";
+			break;
+		case FERR_NF_M4ERR:
+			specific = "Uncorrectable Data ECC on Replay";
+			break;
+		}
+
+		/* Form out message */
+		snprintf(msg, sizeof(msg),
+			 "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
+			 "CAS=%d, UE Err=0x%x (%s))",
+			 branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
+			 ue_errors, specific);
+
+		/* Call the helper to output message */
+		edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+	}
+
+	/* Check correctable errors */
+	ce_errors = allErrors & FERR_NF_CORRECTABLE;
+	if (ce_errors) {
+		debugf0("\tCorrected bits= 0x%x\n", ce_errors);
+
+		branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
+
+		channel = 0;
+		if (REC_ECC_LOCATOR_ODD(info->redmemb))
+			channel = 1;
+
+		/* Convert channel to be based from zero, instead of
+		 * from branch base of 0 */
+		channel += branch;
+
+		bank = REC_BANK(info->recmema);
+		rank = REC_RANK(info->recmema);
+		rdwr = REC_RDWR(info->recmema);
+		ras = REC_RAS(info->recmemb);
+		cas = REC_CAS(info->recmemb);
+
+		debugf0("\t\tCSROW= %d Channel= %d  (Branch %d "
+			"DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+			rank, channel, branch >> 1, bank,
+			rdwr ? "Write" : "Read", ras, cas);
+
+		switch (ce_errors) {
+		case FERR_NF_M17ERR:
+			specific = "Correctable Non-Mirrored Demand Data ECC";
+			break;
+		case FERR_NF_M18ERR:
+			specific = "Correctable Mirrored Demand Data ECC";
+			break;
+		case FERR_NF_M19ERR:
+			specific = "Correctable Spare-Copy Data ECC";
+			break;
+		case FERR_NF_M20ERR:
+			specific = "Correctable Patrol Data ECC";
+			break;
+		}
+
+		/* Form out message */
+		snprintf(msg, sizeof(msg),
+			 "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
+			 "CAS=%d, CE Err=0x%x (%s))", branch >> 1, bank,
+			 rdwr ? "Write" : "Read", ras, cas, ce_errors,
+			 specific);
+
+		/* Call the helper to output message */
+		edac_mc_handle_fbd_ce(mci, rank, channel, msg);
+	}
+
+	if (!misc_messages)
+		return;
+
+	misc_errors = allErrors & (FERR_NF_NON_RETRY | FERR_NF_NORTH_CRC |
+				   FERR_NF_SPD_PROTOCOL | FERR_NF_DIMM_SPARE);
+	if (misc_errors) {
+		switch (misc_errors) {
+		case FERR_NF_M13ERR:
+			specific = "Non-Retry or Redundant Retry FBD Memory "
+					"Alert or Redundant Fast Reset Timeout";
+			break;
+		case FERR_NF_M14ERR:
+			specific = "Non-Retry or Redundant Retry FBD "
+					"Configuration Alert";
+			break;
+		case FERR_NF_M15ERR:
+			specific = "Non-Retry or Redundant Retry FBD "
+					"Northbound CRC error on read data";
+			break;
+		case FERR_NF_M21ERR:
+			specific = "FBD Northbound CRC error on "
+					"FBD Sync Status";
+			break;
+		case FERR_NF_M22ERR:
+			specific = "SPD protocol error";
+			break;
+		case FERR_NF_M27ERR:
+			specific = "DIMM-spare copy started";
+			break;
+		case FERR_NF_M28ERR:
+			specific = "DIMM-spare copy completed";
+			break;
+		}
+		branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
+
+		/* Form out message */
+		snprintf(msg, sizeof(msg),
+			 "(Branch=%d Err=%#x (%s))", branch >> 1,
+			 misc_errors, specific);
+
+		/* Call the helper to output message */
+		edac_mc_handle_fbd_ce(mci, 0, 0, msg);
+	}
+}
+
+/*
+ *	i5000_process_error_info	Process the error info that is
+ *	in the 'info' structure, previously retrieved from hardware
+ */
+static void i5000_process_error_info(struct mem_ctl_info *mci,
+				struct i5000_error_info *info,
+				int handle_errors)
+{
+	/* First handle any fatal errors that occurred */
+	i5000_process_fatal_error_info(mci, info, handle_errors);
+
+	/* now handle any non-fatal errors that occurred */
+	i5000_process_nonfatal_error_info(mci, info, handle_errors);
+}
+
+/*
+ *	i5000_clear_error	Retrieve any error from the hardware
+ *				but do NOT process that error.
+ *				Used for 'clearing' out of previous errors
+ *				Called by the Core module.
+ */
+static void i5000_clear_error(struct mem_ctl_info *mci)
+{
+	struct i5000_error_info info;
+
+	i5000_get_error_info(mci, &info);
+}
+
+/*
+ *	i5000_check_error	Retrieve and process errors reported by the
+ *				hardware. Called by the Core module.
+ */
+static void i5000_check_error(struct mem_ctl_info *mci)
+{
+	struct i5000_error_info info;
+	debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+	i5000_get_error_info(mci, &info);
+	i5000_process_error_info(mci, &info, 1);
+}
+
+/*
+ *	i5000_get_devices	Find and perform 'get' operation on the MCH's
+ *			device/functions we want to reference for this driver
+ *
+ *			Need to 'get' device 16 func 1 and func 2
+ */
+static int i5000_get_devices(struct mem_ctl_info *mci, int dev_idx)
+{
+	//const struct i5000_dev_info *i5000_dev = &i5000_devs[dev_idx];
+	struct i5000_pvt *pvt;
+	struct pci_dev *pdev;
+
+	pvt = mci->pvt_info;
+
+	/* Attempt to 'get' the MCH register we want */
+	pdev = NULL;
+	while (1) {
+		pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+				PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev);
+
+		/* End of list, leave */
+		if (pdev == NULL) {
+			i5000_printk(KERN_ERR,
+				"'system address,Process Bus' "
+				"device not found:"
+				"vendor 0x%x device 0x%x FUNC 1 "
+				"(broken BIOS?)\n",
+				PCI_VENDOR_ID_INTEL,
+				PCI_DEVICE_ID_INTEL_I5000_DEV16);
+
+			return 1;
+		}
+
+		/* Scan for device 16 func 1 */
+		if (PCI_FUNC(pdev->devfn) == 1)
+			break;
+	}
+
+	pvt->branchmap_werrors = pdev;
+
+	/* Attempt to 'get' the MCH register we want */
+	pdev = NULL;
+	while (1) {
+		pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+				PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev);
+
+		if (pdev == NULL) {
+			i5000_printk(KERN_ERR,
+				"MC: 'branchmap,control,errors' "
+				"device not found:"
+				"vendor 0x%x device 0x%x Func 2 "
+				"(broken BIOS?)\n",
+				PCI_VENDOR_ID_INTEL,
+				PCI_DEVICE_ID_INTEL_I5000_DEV16);
+
+			pci_dev_put(pvt->branchmap_werrors);
+			return 1;
+		}
+
+		/* Scan for device 16 func 1 */
+		if (PCI_FUNC(pdev->devfn) == 2)
+			break;
+	}
+
+	pvt->fsb_error_regs = pdev;
+
+	debugf1("System Address, processor bus- PCI Bus ID: %s  %x:%x\n",
+		pci_name(pvt->system_address),
+		pvt->system_address->vendor, pvt->system_address->device);
+	debugf1("Branchmap, control and errors - PCI Bus ID: %s  %x:%x\n",
+		pci_name(pvt->branchmap_werrors),
+		pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device);
+	debugf1("FSB Error Regs - PCI Bus ID: %s  %x:%x\n",
+		pci_name(pvt->fsb_error_regs),
+		pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
+
+	pdev = NULL;
+	pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+			PCI_DEVICE_ID_I5000_BRANCH_0, pdev);
+
+	if (pdev == NULL) {
+		i5000_printk(KERN_ERR,
+			"MC: 'BRANCH 0' device not found:"
+			"vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
+			PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_I5000_BRANCH_0);
+
+		pci_dev_put(pvt->branchmap_werrors);
+		pci_dev_put(pvt->fsb_error_regs);
+		return 1;
+	}
+
+	pvt->branch_0 = pdev;
+
+	/* If this device claims to have more than 2 channels then
+	 * fetch Branch 1's information
+	 */
+	if (pvt->maxch >= CHANNELS_PER_BRANCH) {
+		pdev = NULL;
+		pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+				PCI_DEVICE_ID_I5000_BRANCH_1, pdev);
+
+		if (pdev == NULL) {
+			i5000_printk(KERN_ERR,
+				"MC: 'BRANCH 1' device not found:"
+				"vendor 0x%x device 0x%x Func 0 "
+				"(broken BIOS?)\n",
+				PCI_VENDOR_ID_INTEL,
+				PCI_DEVICE_ID_I5000_BRANCH_1);
+
+			pci_dev_put(pvt->branchmap_werrors);
+			pci_dev_put(pvt->fsb_error_regs);
+			pci_dev_put(pvt->branch_0);
+			return 1;
+		}
+
+		pvt->branch_1 = pdev;
+	}
+
+	return 0;
+}
+
+/*
+ *	i5000_put_devices	'put' all the devices that we have
+ *				reserved via 'get'
+ */
+static void i5000_put_devices(struct mem_ctl_info *mci)
+{
+	struct i5000_pvt *pvt;
+
+	pvt = mci->pvt_info;
+
+	pci_dev_put(pvt->branchmap_werrors);	/* FUNC 1 */
+	pci_dev_put(pvt->fsb_error_regs);	/* FUNC 2 */
+	pci_dev_put(pvt->branch_0);	/* DEV 21 */
+
+	/* Only if more than 2 channels do we release the second branch */
+	if (pvt->maxch >= CHANNELS_PER_BRANCH)
+		pci_dev_put(pvt->branch_1);	/* DEV 22 */
+}
+
+/*
+ *	determine_amb_resent
+ *
+ *		the information is contained in NUM_MTRS different registers
+ *		determineing which of the NUM_MTRS requires knowing
+ *		which channel is in question
+ *
+ *	2 branches, each with 2 channels
+ *		b0_ambpresent0 for channel '0'
+ *		b0_ambpresent1 for channel '1'
+ *		b1_ambpresent0 for channel '2'
+ *		b1_ambpresent1 for channel '3'
+ */
+static int determine_amb_present_reg(struct i5000_pvt *pvt, int channel)
+{
+	int amb_present;
+
+	if (channel < CHANNELS_PER_BRANCH) {
+		if (channel & 0x1)
+			amb_present = pvt->b0_ambpresent1;
+		else
+			amb_present = pvt->b0_ambpresent0;
+	} else {
+		if (channel & 0x1)
+			amb_present = pvt->b1_ambpresent1;
+		else
+			amb_present = pvt->b1_ambpresent0;
+	}
+
+	return amb_present;
+}
+
+/*
+ * determine_mtr(pvt, csrow, channel)
+ *
+ *	return the proper MTR register as determine by the csrow and channel desired
+ */
+static int determine_mtr(struct i5000_pvt *pvt, int csrow, int channel)
+{
+	int mtr;
+
+	if (channel < CHANNELS_PER_BRANCH)
+		mtr = pvt->b0_mtr[csrow >> 1];
+	else
+		mtr = pvt->b1_mtr[csrow >> 1];
+
+	return mtr;
+}
+
+/*
+ */
+static void decode_mtr(int slot_row, u16 mtr)
+{
+	int ans;
+
+	ans = MTR_DIMMS_PRESENT(mtr);
+
+	debugf2("\tMTR%d=0x%x:  DIMMs are %s\n", slot_row, mtr,
+		ans ? "Present" : "NOT Present");
+	if (!ans)
+		return;
+
+	debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
+	debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
+	debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single");
+	debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]);
+	debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
+}
+
+static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
+			struct i5000_dimm_info *dinfo)
+{
+	int mtr;
+	int amb_present_reg;
+	int addrBits;
+
+	mtr = determine_mtr(pvt, csrow, channel);
+	if (MTR_DIMMS_PRESENT(mtr)) {
+		amb_present_reg = determine_amb_present_reg(pvt, channel);
+
+		/* Determine if there is  a  DIMM present in this DIMM slot */
+		if (amb_present_reg & (1 << (csrow >> 1))) {
+			dinfo->dual_rank = MTR_DIMM_RANK(mtr);
+
+			if (!((dinfo->dual_rank == 0) &&
+				((csrow & 0x1) == 0x1))) {
+				/* Start with the number of bits for a Bank
+				 * on the DRAM */
+				addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
+				/* Add thenumber of ROW bits */
+				addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
+				/* add the number of COLUMN bits */
+				addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
+
+				addrBits += 6;	/* add 64 bits per DIMM */
+				addrBits -= 20;	/* divide by 2^^20 */
+				addrBits -= 3;	/* 8 bits per bytes */
+
+				dinfo->megabytes = 1 << addrBits;
+			}
+		}
+	}
+}
+
+/*
+ *	calculate_dimm_size
+ *
+ *	also will output a DIMM matrix map, if debug is enabled, for viewing
+ *	how the DIMMs are populated
+ */
+static void calculate_dimm_size(struct i5000_pvt *pvt)
+{
+	struct i5000_dimm_info *dinfo;
+	int csrow, max_csrows;
+	char *p, *mem_buffer;
+	int space, n;
+	int channel;
+
+	/* ================= Generate some debug output ================= */
+	space = PAGE_SIZE;
+	mem_buffer = p = kmalloc(space, GFP_KERNEL);
+	if (p == NULL) {
+		i5000_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n",
+			__FILE__, __func__);
+		return;
+	}
+
+	n = snprintf(p, space, "\n");
+	p += n;
+	space -= n;
+
+	/* Scan all the actual CSROWS (which is # of DIMMS * 2)
+	 * and calculate the information for each DIMM
+	 * Start with the highest csrow first, to display it first
+	 * and work toward the 0th csrow
+	 */
+	max_csrows = pvt->maxdimmperch * 2;
+	for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
+
+		/* on an odd csrow, first output a 'boundary' marker,
+		 * then reset the message buffer  */
+		if (csrow & 0x1) {
+			n = snprintf(p, space, "---------------------------"
+				"--------------------------------");
+			p += n;
+			space -= n;
+			debugf2("%s\n", mem_buffer);
+			p = mem_buffer;
+			space = PAGE_SIZE;
+		}
+		n = snprintf(p, space, "csrow %2d    ", csrow);
+		p += n;
+		space -= n;
+
+		for (channel = 0; channel < pvt->maxch; channel++) {
+			dinfo = &pvt->dimm_info[csrow][channel];
+			handle_channel(pvt, csrow, channel, dinfo);
+			n = snprintf(p, space, "%4d MB   | ", dinfo->megabytes);
+			p += n;
+			space -= n;
+		}
+		n = snprintf(p, space, "\n");
+		p += n;
+		space -= n;
+	}
+
+	/* Output the last bottom 'boundary' marker */
+	n = snprintf(p, space, "---------------------------"
+		"--------------------------------\n");
+	p += n;
+	space -= n;
+
+	/* now output the 'channel' labels */
+	n = snprintf(p, space, "            ");
+	p += n;
+	space -= n;
+	for (channel = 0; channel < pvt->maxch; channel++) {
+		n = snprintf(p, space, "channel %d | ", channel);
+		p += n;
+		space -= n;
+	}
+	n = snprintf(p, space, "\n");
+	p += n;
+	space -= n;
+
+	/* output the last message and free buffer */
+	debugf2("%s\n", mem_buffer);
+	kfree(mem_buffer);
+}
+
+/*
+ *	i5000_get_mc_regs	read in the necessary registers and
+ *				cache locally
+ *
+ *			Fills in the private data members
+ */
+static void i5000_get_mc_regs(struct mem_ctl_info *mci)
+{
+	struct i5000_pvt *pvt;
+	u32 actual_tolm;
+	u16 limit;
+	int slot_row;
+	int maxch;
+	int maxdimmperch;
+	int way0, way1;
+
+	pvt = mci->pvt_info;
+
+	pci_read_config_dword(pvt->system_address, AMBASE,
+			(u32 *) & pvt->ambase);
+	pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
+			((u32 *) & pvt->ambase) + sizeof(u32));
+
+	maxdimmperch = pvt->maxdimmperch;
+	maxch = pvt->maxch;
+
+	debugf2("AMBASE= 0x%lx  MAXCH= %d  MAX-DIMM-Per-CH= %d\n",
+		(long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
+
+	/* Get the Branch Map regs */
+	pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
+	pvt->tolm >>= 12;
+	debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
+		pvt->tolm);
+
+	actual_tolm = pvt->tolm << 28;
+	debugf2("Actual TOLM byte addr=%u (0x%x)\n", actual_tolm, actual_tolm);
+
+	pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
+	pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
+	pci_read_config_word(pvt->branchmap_werrors, MIR2, &pvt->mir2);
+
+	/* Get the MIR[0-2] regs */
+	limit = (pvt->mir0 >> 4) & 0x0FFF;
+	way0 = pvt->mir0 & 0x1;
+	way1 = pvt->mir0 & 0x2;
+	debugf2("MIR0: limit= 0x%x  WAY1= %u  WAY0= %x\n", limit, way1, way0);
+	limit = (pvt->mir1 >> 4) & 0x0FFF;
+	way0 = pvt->mir1 & 0x1;
+	way1 = pvt->mir1 & 0x2;
+	debugf2("MIR1: limit= 0x%x  WAY1= %u  WAY0= %x\n", limit, way1, way0);
+	limit = (pvt->mir2 >> 4) & 0x0FFF;
+	way0 = pvt->mir2 & 0x1;
+	way1 = pvt->mir2 & 0x2;
+	debugf2("MIR2: limit= 0x%x  WAY1= %u  WAY0= %x\n", limit, way1, way0);
+
+	/* Get the MTR[0-3] regs */
+	for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
+		int where = MTR0 + (slot_row * sizeof(u32));
+
+		pci_read_config_word(pvt->branch_0, where,
+				&pvt->b0_mtr[slot_row]);
+
+		debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where,
+			pvt->b0_mtr[slot_row]);
+
+		if (pvt->maxch >= CHANNELS_PER_BRANCH) {
+			pci_read_config_word(pvt->branch_1, where,
+					&pvt->b1_mtr[slot_row]);
+			debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row,
+				where, pvt->b1_mtr[slot_row]);
+		} else {
+			pvt->b1_mtr[slot_row] = 0;
+		}
+	}
+
+	/* Read and dump branch 0's MTRs */
+	debugf2("\nMemory Technology Registers:\n");
+	debugf2("   Branch 0:\n");
+	for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
+		decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
+	}
+	pci_read_config_word(pvt->branch_0, AMB_PRESENT_0,
+			&pvt->b0_ambpresent0);
+	debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
+	pci_read_config_word(pvt->branch_0, AMB_PRESENT_1,
+			&pvt->b0_ambpresent1);
+	debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
+
+	/* Only if we have 2 branchs (4 channels) */
+	if (pvt->maxch < CHANNELS_PER_BRANCH) {
+		pvt->b1_ambpresent0 = 0;
+		pvt->b1_ambpresent1 = 0;
+	} else {
+		/* Read and dump  branch 1's MTRs */
+		debugf2("   Branch 1:\n");
+		for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
+			decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
+		}
+		pci_read_config_word(pvt->branch_1, AMB_PRESENT_0,
+				&pvt->b1_ambpresent0);
+		debugf2("\t\tAMB-Branch 1-present0 0x%x:\n",
+			pvt->b1_ambpresent0);
+		pci_read_config_word(pvt->branch_1, AMB_PRESENT_1,
+				&pvt->b1_ambpresent1);
+		debugf2("\t\tAMB-Branch 1-present1 0x%x:\n",
+			pvt->b1_ambpresent1);
+	}
+
+	/* Go and determine the size of each DIMM and place in an
+	 * orderly matrix */
+	calculate_dimm_size(pvt);
+}
+
+/*
+ *	i5000_init_csrows	Initialize the 'csrows' table within
+ *				the mci control	structure with the
+ *				addressing of memory.
+ *
+ *	return:
+ *		0	success
+ *		1	no actual memory found on this MC
+ */
+static int i5000_init_csrows(struct mem_ctl_info *mci)
+{
+	struct i5000_pvt *pvt;
+	struct csrow_info *p_csrow;
+	int empty, channel_count;
+	int max_csrows;
+	int mtr, mtr1;
+	int csrow_megs;
+	int channel;
+	int csrow;
+
+	pvt = mci->pvt_info;
+
+	channel_count = pvt->maxch;
+	max_csrows = pvt->maxdimmperch * 2;
+
+	empty = 1;		/* Assume NO memory */
+
+	for (csrow = 0; csrow < max_csrows; csrow++) {
+		p_csrow = &mci->csrows[csrow];
+
+		p_csrow->csrow_idx = csrow;
+
+		/* use branch 0 for the basis */
+		mtr = pvt->b0_mtr[csrow >> 1];
+		mtr1 = pvt->b1_mtr[csrow >> 1];
+
+		/* if no DIMMS on this row, continue */
+		if (!MTR_DIMMS_PRESENT(mtr) && !MTR_DIMMS_PRESENT(mtr1))
+			continue;
+
+		/* FAKE OUT VALUES, FIXME */
+		p_csrow->first_page = 0 + csrow * 20;
+		p_csrow->last_page = 9 + csrow * 20;
+		p_csrow->page_mask = 0xFFF;
+
+		p_csrow->grain = 8;
+
+		csrow_megs = 0;
+		for (channel = 0; channel < pvt->maxch; channel++) {
+			csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
+		}
+
+		p_csrow->nr_pages = csrow_megs << 8;
+
+		/* Assume DDR2 for now */
+		p_csrow->mtype = MEM_FB_DDR2;
+
+		/* ask what device type on this row */
+		if (MTR_DRAM_WIDTH(mtr))
+			p_csrow->dtype = DEV_X8;
+		else
+			p_csrow->dtype = DEV_X4;
+
+		p_csrow->edac_mode = EDAC_S8ECD8ED;
+
+		empty = 0;
+	}
+
+	return empty;
+}
+
+/*
+ *	i5000_enable_error_reporting
+ *			Turn on the memory reporting features of the hardware
+ */
+static void i5000_enable_error_reporting(struct mem_ctl_info *mci)
+{
+	struct i5000_pvt *pvt;
+	u32 fbd_error_mask;
+
+	pvt = mci->pvt_info;
+
+	/* Read the FBD Error Mask Register */
+	pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD,
+			&fbd_error_mask);
+
+	/* Enable with a '0' */
+	fbd_error_mask &= ~(ENABLE_EMASK_ALL);
+
+	pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD,
+			fbd_error_mask);
+}
+
+/*
+ * i5000_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels)
+ *
+ *	ask the device how many channels are present and how many CSROWS
+ *	 as well
+ */
+static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
+					int *num_dimms_per_channel,
+					int *num_channels)
+{
+	u8 value;
+
+	/* Need to retrieve just how many channels and dimms per channel are
+	 * supported on this memory controller
+	 */
+	pci_read_config_byte(pdev, MAXDIMMPERCH, &value);
+	*num_dimms_per_channel = (int)value *2;
+
+	pci_read_config_byte(pdev, MAXCH, &value);
+	*num_channels = (int)value;
+}
+
+/*
+ *	i5000_probe1	Probe for ONE instance of device to see if it is
+ *			present.
+ *	return:
+ *		0 for FOUND a device
+ *		< 0 for error code
+ */
+static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
+{
+	struct mem_ctl_info *mci;
+	struct i5000_pvt *pvt;
+	int num_channels;
+	int num_dimms_per_channel;
+	int num_csrows;
+
+	debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n",
+		__func__,
+		pdev->bus->number,
+		PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+
+	/* We only are looking for func 0 of the set */
+	if (PCI_FUNC(pdev->devfn) != 0)
+		return -ENODEV;
+
+	/* Ask the devices for the number of CSROWS and CHANNELS so
+	 * that we can calculate the memory resources, etc
+	 *
+	 * The Chipset will report what it can handle which will be greater
+	 * or equal to what the motherboard manufacturer will implement.
+	 *
+	 * As we don't have a motherboard identification routine to determine
+	 * actual number of slots/dimms per channel, we thus utilize the
+	 * resource as specified by the chipset. Thus, we might have
+	 * have more DIMMs per channel than actually on the mobo, but this
+	 * allows the driver to support upto the chipset max, without
+	 * some fancy mobo determination.
+	 */
+	i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
+					&num_channels);
+	num_csrows = num_dimms_per_channel * 2;
+
+	debugf0("MC: %s(): Number of - Channels= %d  DIMMS= %d  CSROWS= %d\n",
+		__func__, num_channels, num_dimms_per_channel, num_csrows);
+
+	/* allocate a new MC control structure */
+	mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
+
+	if (mci == NULL)
+		return -ENOMEM;
+
+	kobject_get(&mci->edac_mci_kobj);
+	debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+
+	mci->dev = &pdev->dev;	/* record ptr  to the generic device */
+
+	pvt = mci->pvt_info;
+	pvt->system_address = pdev;	/* Record this device in our private */
+	pvt->maxch = num_channels;
+	pvt->maxdimmperch = num_dimms_per_channel;
+
+	/* 'get' the pci devices we want to reserve for our use */
+	if (i5000_get_devices(mci, dev_idx))
+		goto fail0;
+
+	/* Time to get serious */
+	i5000_get_mc_regs(mci);	/* retrieve the hardware registers */
+
+	mci->mc_idx = 0;
+	mci->mtype_cap = MEM_FLAG_FB_DDR2;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE;
+	mci->edac_cap = EDAC_FLAG_NONE;
+	mci->mod_name = "i5000_edac.c";
+	mci->mod_ver = I5000_REVISION;
+	mci->ctl_name = i5000_devs[dev_idx].ctl_name;
+	mci->dev_name = pci_name(pdev);
+	mci->ctl_page_to_phys = NULL;
+
+	/* Set the function pointer to an actual operation function */
+	mci->edac_check = i5000_check_error;
+
+	/* initialize the MC control structure 'csrows' table
+	 * with the mapping and control information */
+	if (i5000_init_csrows(mci)) {
+		debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
+			"    because i5000_init_csrows() returned nonzero "
+			"value\n");
+		mci->edac_cap = EDAC_FLAG_NONE;	/* no csrows found */
+	} else {
+		debugf1("MC: Enable error reporting now\n");
+		i5000_enable_error_reporting(mci);
+	}
+
+	/* add this new MC control structure to EDAC's list of MCs */
+	if (edac_mc_add_mc(mci)) {
+		debugf0("MC: " __FILE__
+			": %s(): failed edac_mc_add_mc()\n", __func__);
+		/* FIXME: perhaps some code should go here that disables error
+		 * reporting if we just enabled it
+		 */
+		goto fail1;
+	}
+
+	i5000_clear_error(mci);
+
+	/* allocating generic PCI control info */
+	i5000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!i5000_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n",
+			__func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
+
+	return 0;
+
+	/* Error exit unwinding stack */
+fail1:
+
+	i5000_put_devices(mci);
+
+fail0:
+	kobject_put(&mci->edac_mci_kobj);
+	edac_mc_free(mci);
+	return -ENODEV;
+}
+
+/*
+ *	i5000_init_one	constructor for one instance of device
+ *
+ * 	returns:
+ *		negative on error
+ *		count (>= 0)
+ */
+static int __devinit i5000_init_one(struct pci_dev *pdev,
+				const struct pci_device_id *id)
+{
+	int rc;
+
+	debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+	/* wake up device */
+	rc = pci_enable_device(pdev);
+	if (rc == -EIO)
+		return rc;
+
+	/* now probe and enable the device */
+	return i5000_probe1(pdev, id->driver_data);
+}
+
+/*
+ *	i5000_remove_one	destructor for one instance of device
+ *
+ */
+static void __devexit i5000_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+
+	debugf0(__FILE__ ": %s()\n", __func__);
+
+	if (i5000_pci)
+		edac_pci_release_generic_ctl(i5000_pci);
+
+	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+		return;
+
+	/* retrieve references to resources, and free those resources */
+	i5000_put_devices(mci);
+	kobject_put(&mci->edac_mci_kobj);
+	edac_mc_free(mci);
+}
+
+/*
+ *	pci_device_id	table for which devices we are looking for
+ *
+ *	The "E500P" device is the first device supported.
+ */
+static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
+	 .driver_data = I5000P},
+
+	{0,}			/* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i5000_pci_tbl);
+
+/*
+ *	i5000_driver	pci_driver structure for this module
+ *
+ */
+static struct pci_driver i5000_driver = {
+	.name = KBUILD_BASENAME,
+	.probe = i5000_init_one,
+	.remove = __devexit_p(i5000_remove_one),
+	.id_table = i5000_pci_tbl,
+};
+
+/*
+ *	i5000_init		Module entry function
+ *			Try to initialize this module for its devices
+ */
+static int __init i5000_init(void)
+{
+	int pci_rc;
+
+	debugf2("MC: " __FILE__ ": %s()\n", __func__);
+
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
+	pci_rc = pci_register_driver(&i5000_driver);
+
+	return (pci_rc < 0) ? pci_rc : 0;
+}
+
+/*
+ *	i5000_exit()	Module exit function
+ *			Unregister the driver
+ */
+static void __exit i5000_exit(void)
+{
+	debugf2("MC: " __FILE__ ": %s()\n", __func__);
+	pci_unregister_driver(&i5000_driver);
+}
+
+module_init(i5000_init);
+module_exit(i5000_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR
+    ("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>");
+MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - "
+		I5000_REVISION);
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+module_param(misc_messages, int, 0444);
+MODULE_PARM_DESC(misc_messages, "Log miscellaneous non fatal messages");
+
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
new file mode 100644
index 0000000..22db05a
--- /dev/null
+++ b/drivers/edac/i5100_edac.c
@@ -0,0 +1,981 @@
+/*
+ * Intel 5100 Memory Controllers kernel module
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * This module is based on the following document:
+ *
+ * Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet
+ *      http://download.intel.com/design/chipsets/datashts/318378.pdf
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include <linux/delay.h>
+#include <linux/mmzone.h>
+
+#include "edac_core.h"
+
+/* register addresses */
+
+/* device 16, func 1 */
+#define I5100_MC		0x40	/* Memory Control Register */
+#define I5100_MS		0x44	/* Memory Status Register */
+#define I5100_SPDDATA		0x48	/* Serial Presence Detect Status Reg */
+#define I5100_SPDCMD		0x4c	/* Serial Presence Detect Command Reg */
+#define I5100_TOLM		0x6c	/* Top of Low Memory */
+#define I5100_MIR0		0x80	/* Memory Interleave Range 0 */
+#define I5100_MIR1		0x84	/* Memory Interleave Range 1 */
+#define I5100_AMIR_0		0x8c	/* Adjusted Memory Interleave Range 0 */
+#define I5100_AMIR_1		0x90	/* Adjusted Memory Interleave Range 1 */
+#define I5100_FERR_NF_MEM	0xa0	/* MC First Non Fatal Errors */
+#define		I5100_FERR_NF_MEM_M16ERR_MASK	(1 << 16)
+#define		I5100_FERR_NF_MEM_M15ERR_MASK	(1 << 15)
+#define		I5100_FERR_NF_MEM_M14ERR_MASK	(1 << 14)
+#define		I5100_FERR_NF_MEM_M12ERR_MASK	(1 << 12)
+#define		I5100_FERR_NF_MEM_M11ERR_MASK	(1 << 11)
+#define		I5100_FERR_NF_MEM_M10ERR_MASK	(1 << 10)
+#define		I5100_FERR_NF_MEM_M6ERR_MASK	(1 << 6)
+#define		I5100_FERR_NF_MEM_M5ERR_MASK	(1 << 5)
+#define		I5100_FERR_NF_MEM_M4ERR_MASK	(1 << 4)
+#define		I5100_FERR_NF_MEM_M1ERR_MASK	1
+#define		I5100_FERR_NF_MEM_ANY_MASK	\
+			(I5100_FERR_NF_MEM_M16ERR_MASK | \
+			I5100_FERR_NF_MEM_M15ERR_MASK | \
+			I5100_FERR_NF_MEM_M14ERR_MASK | \
+			I5100_FERR_NF_MEM_M12ERR_MASK | \
+			I5100_FERR_NF_MEM_M11ERR_MASK | \
+			I5100_FERR_NF_MEM_M10ERR_MASK | \
+			I5100_FERR_NF_MEM_M6ERR_MASK | \
+			I5100_FERR_NF_MEM_M5ERR_MASK | \
+			I5100_FERR_NF_MEM_M4ERR_MASK | \
+			I5100_FERR_NF_MEM_M1ERR_MASK)
+#define	I5100_NERR_NF_MEM	0xa4	/* MC Next Non-Fatal Errors */
+#define I5100_EMASK_MEM		0xa8	/* MC Error Mask Register */
+
+/* device 21 and 22, func 0 */
+#define I5100_MTR_0	0x154	/* Memory Technology Registers 0-3 */
+#define I5100_DMIR	0x15c	/* DIMM Interleave Range */
+#define	I5100_VALIDLOG	0x18c	/* Valid Log Markers */
+#define	I5100_NRECMEMA	0x190	/* Non-Recoverable Memory Error Log Reg A */
+#define	I5100_NRECMEMB	0x194	/* Non-Recoverable Memory Error Log Reg B */
+#define	I5100_REDMEMA	0x198	/* Recoverable Memory Data Error Log Reg A */
+#define	I5100_REDMEMB	0x19c	/* Recoverable Memory Data Error Log Reg B */
+#define	I5100_RECMEMA	0x1a0	/* Recoverable Memory Error Log Reg A */
+#define	I5100_RECMEMB	0x1a4	/* Recoverable Memory Error Log Reg B */
+#define I5100_MTR_4	0x1b0	/* Memory Technology Registers 4,5 */
+
+/* bit field accessors */
+
+static inline u32 i5100_mc_errdeten(u32 mc)
+{
+	return mc >> 5 & 1;
+}
+
+static inline u16 i5100_spddata_rdo(u16 a)
+{
+	return a >> 15 & 1;
+}
+
+static inline u16 i5100_spddata_sbe(u16 a)
+{
+	return a >> 13 & 1;
+}
+
+static inline u16 i5100_spddata_busy(u16 a)
+{
+	return a >> 12 & 1;
+}
+
+static inline u16 i5100_spddata_data(u16 a)
+{
+	return a & ((1 << 8) - 1);
+}
+
+static inline u32 i5100_spdcmd_create(u32 dti, u32 ckovrd, u32 sa, u32 ba,
+				      u32 data, u32 cmd)
+{
+	return	((dti & ((1 << 4) - 1))  << 28) |
+		((ckovrd & 1)            << 27) |
+		((sa & ((1 << 3) - 1))   << 24) |
+		((ba & ((1 << 8) - 1))   << 16) |
+		((data & ((1 << 8) - 1)) <<  8) |
+		(cmd & 1);
+}
+
+static inline u16 i5100_tolm_tolm(u16 a)
+{
+	return a >> 12 & ((1 << 4) - 1);
+}
+
+static inline u16 i5100_mir_limit(u16 a)
+{
+	return a >> 4 & ((1 << 12) - 1);
+}
+
+static inline u16 i5100_mir_way1(u16 a)
+{
+	return a >> 1 & 1;
+}
+
+static inline u16 i5100_mir_way0(u16 a)
+{
+	return a & 1;
+}
+
+static inline u32 i5100_ferr_nf_mem_chan_indx(u32 a)
+{
+	return a >> 28 & 1;
+}
+
+static inline u32 i5100_ferr_nf_mem_any(u32 a)
+{
+	return a & I5100_FERR_NF_MEM_ANY_MASK;
+}
+
+static inline u32 i5100_nerr_nf_mem_any(u32 a)
+{
+	return i5100_ferr_nf_mem_any(a);
+}
+
+static inline u32 i5100_dmir_limit(u32 a)
+{
+	return a >> 16 & ((1 << 11) - 1);
+}
+
+static inline u32 i5100_dmir_rank(u32 a, u32 i)
+{
+	return a >> (4 * i) & ((1 << 2) - 1);
+}
+
+static inline u16 i5100_mtr_present(u16 a)
+{
+	return a >> 10 & 1;
+}
+
+static inline u16 i5100_mtr_ethrottle(u16 a)
+{
+	return a >> 9 & 1;
+}
+
+static inline u16 i5100_mtr_width(u16 a)
+{
+	return a >> 8 & 1;
+}
+
+static inline u16 i5100_mtr_numbank(u16 a)
+{
+	return a >> 6 & 1;
+}
+
+static inline u16 i5100_mtr_numrow(u16 a)
+{
+	return a >> 2 & ((1 << 2) - 1);
+}
+
+static inline u16 i5100_mtr_numcol(u16 a)
+{
+	return a & ((1 << 2) - 1);
+}
+
+
+static inline u32 i5100_validlog_redmemvalid(u32 a)
+{
+	return a >> 2 & 1;
+}
+
+static inline u32 i5100_validlog_recmemvalid(u32 a)
+{
+	return a >> 1 & 1;
+}
+
+static inline u32 i5100_validlog_nrecmemvalid(u32 a)
+{
+	return a & 1;
+}
+
+static inline u32 i5100_nrecmema_merr(u32 a)
+{
+	return a >> 15 & ((1 << 5) - 1);
+}
+
+static inline u32 i5100_nrecmema_bank(u32 a)
+{
+	return a >> 12 & ((1 << 3) - 1);
+}
+
+static inline u32 i5100_nrecmema_rank(u32 a)
+{
+	return a >>  8 & ((1 << 3) - 1);
+}
+
+static inline u32 i5100_nrecmema_dm_buf_id(u32 a)
+{
+	return a & ((1 << 8) - 1);
+}
+
+static inline u32 i5100_nrecmemb_cas(u32 a)
+{
+	return a >> 16 & ((1 << 13) - 1);
+}
+
+static inline u32 i5100_nrecmemb_ras(u32 a)
+{
+	return a & ((1 << 16) - 1);
+}
+
+static inline u32 i5100_redmemb_ecc_locator(u32 a)
+{
+	return a & ((1 << 18) - 1);
+}
+
+static inline u32 i5100_recmema_merr(u32 a)
+{
+	return i5100_nrecmema_merr(a);
+}
+
+static inline u32 i5100_recmema_bank(u32 a)
+{
+	return i5100_nrecmema_bank(a);
+}
+
+static inline u32 i5100_recmema_rank(u32 a)
+{
+	return i5100_nrecmema_rank(a);
+}
+
+static inline u32 i5100_recmema_dm_buf_id(u32 a)
+{
+	return i5100_nrecmema_dm_buf_id(a);
+}
+
+static inline u32 i5100_recmemb_cas(u32 a)
+{
+	return i5100_nrecmemb_cas(a);
+}
+
+static inline u32 i5100_recmemb_ras(u32 a)
+{
+	return i5100_nrecmemb_ras(a);
+}
+
+/* some generic limits */
+#define I5100_MAX_RANKS_PER_CTLR	6
+#define I5100_MAX_CTLRS			2
+#define I5100_MAX_RANKS_PER_DIMM	4
+#define I5100_DIMM_ADDR_LINES		(6 - 3)	/* 64 bits / 8 bits per byte */
+#define I5100_MAX_DIMM_SLOTS_PER_CTLR	4
+#define I5100_MAX_RANK_INTERLEAVE	4
+#define I5100_MAX_DMIRS			5
+
+struct i5100_priv {
+	/* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
+	int dimm_numrank[I5100_MAX_CTLRS][I5100_MAX_DIMM_SLOTS_PER_CTLR];
+
+	/*
+	 * mainboard chip select map -- maps i5100 chip selects to
+	 * DIMM slot chip selects.  In the case of only 4 ranks per
+	 * controller, the mapping is fairly obvious but not unique.
+	 * we map -1 -> NC and assume both controllers use the same
+	 * map...
+	 *
+	 */
+	int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CTLR][I5100_MAX_RANKS_PER_DIMM];
+
+	/* memory interleave range */
+	struct {
+		u64	 limit;
+		unsigned way[2];
+	} mir[I5100_MAX_CTLRS];
+
+	/* adjusted memory interleave range register */
+	unsigned amir[I5100_MAX_CTLRS];
+
+	/* dimm interleave range */
+	struct {
+		unsigned rank[I5100_MAX_RANK_INTERLEAVE];
+		u64	 limit;
+	} dmir[I5100_MAX_CTLRS][I5100_MAX_DMIRS];
+
+	/* memory technology registers... */
+	struct {
+		unsigned present;	/* 0 or 1 */
+		unsigned ethrottle;	/* 0 or 1 */
+		unsigned width;		/* 4 or 8 bits  */
+		unsigned numbank;	/* 2 or 3 lines */
+		unsigned numrow;	/* 13 .. 16 lines */
+		unsigned numcol;	/* 11 .. 12 lines */
+	} mtr[I5100_MAX_CTLRS][I5100_MAX_RANKS_PER_CTLR];
+
+	u64 tolm;		/* top of low memory in bytes */
+	unsigned ranksperctlr;	/* number of ranks per controller */
+
+	struct pci_dev *mc;	/* device 16 func 1 */
+	struct pci_dev *ch0mm;	/* device 21 func 0 */
+	struct pci_dev *ch1mm;	/* device 22 func 0 */
+};
+
+/* map a rank/ctlr to a slot number on the mainboard */
+static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
+			      int ctlr, int rank)
+{
+	const struct i5100_priv *priv = mci->pvt_info;
+	int i;
+
+	for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
+		int j;
+		const int numrank = priv->dimm_numrank[ctlr][i];
+
+		for (j = 0; j < numrank; j++)
+			if (priv->dimm_csmap[i][j] == rank)
+				return i * 2 + ctlr;
+	}
+
+	return -1;
+}
+
+static const char *i5100_err_msg(unsigned err)
+{
+	static const char *merrs[] = {
+		"unknown", /* 0 */
+		"uncorrectable data ECC on replay", /* 1 */
+		"unknown", /* 2 */
+		"unknown", /* 3 */
+		"aliased uncorrectable demand data ECC", /* 4 */
+		"aliased uncorrectable spare-copy data ECC", /* 5 */
+		"aliased uncorrectable patrol data ECC", /* 6 */
+		"unknown", /* 7 */
+		"unknown", /* 8 */
+		"unknown", /* 9 */
+		"non-aliased uncorrectable demand data ECC", /* 10 */
+		"non-aliased uncorrectable spare-copy data ECC", /* 11 */
+		"non-aliased uncorrectable patrol data ECC", /* 12 */
+		"unknown", /* 13 */
+		"correctable demand data ECC", /* 14 */
+		"correctable spare-copy data ECC", /* 15 */
+		"correctable patrol data ECC", /* 16 */
+		"unknown", /* 17 */
+		"SPD protocol error", /* 18 */
+		"unknown", /* 19 */
+		"spare copy initiated", /* 20 */
+		"spare copy completed", /* 21 */
+	};
+	unsigned i;
+
+	for (i = 0; i < ARRAY_SIZE(merrs); i++)
+		if (1 << i & err)
+			return merrs[i];
+
+	return "none";
+}
+
+/* convert csrow index into a rank (per controller -- 0..5) */
+static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
+{
+	const struct i5100_priv *priv = mci->pvt_info;
+
+	return csrow % priv->ranksperctlr;
+}
+
+/* convert csrow index into a controller (0..1) */
+static int i5100_csrow_to_cntlr(const struct mem_ctl_info *mci, int csrow)
+{
+	const struct i5100_priv *priv = mci->pvt_info;
+
+	return csrow / priv->ranksperctlr;
+}
+
+static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
+				    int ctlr, int rank)
+{
+	const struct i5100_priv *priv = mci->pvt_info;
+
+	return ctlr * priv->ranksperctlr + rank;
+}
+
+static void i5100_handle_ce(struct mem_ctl_info *mci,
+			    int ctlr,
+			    unsigned bank,
+			    unsigned rank,
+			    unsigned long syndrome,
+			    unsigned cas,
+			    unsigned ras,
+			    const char *msg)
+{
+	const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
+
+	printk(KERN_ERR
+		"CE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
+		"cas %u, ras %u, csrow %u, label \"%s\": %s\n",
+		ctlr, bank, rank, syndrome, cas, ras,
+		csrow, mci->csrows[csrow].channels[0].label, msg);
+
+	mci->ce_count++;
+	mci->csrows[csrow].ce_count++;
+	mci->csrows[csrow].channels[0].ce_count++;
+}
+
+static void i5100_handle_ue(struct mem_ctl_info *mci,
+			    int ctlr,
+			    unsigned bank,
+			    unsigned rank,
+			    unsigned long syndrome,
+			    unsigned cas,
+			    unsigned ras,
+			    const char *msg)
+{
+	const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
+
+	printk(KERN_ERR
+		"UE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
+		"cas %u, ras %u, csrow %u, label \"%s\": %s\n",
+		ctlr, bank, rank, syndrome, cas, ras,
+		csrow, mci->csrows[csrow].channels[0].label, msg);
+
+	mci->ue_count++;
+	mci->csrows[csrow].ue_count++;
+}
+
+static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
+			   u32 ferr, u32 nerr)
+{
+	struct i5100_priv *priv = mci->pvt_info;
+	struct pci_dev *pdev = (ctlr) ? priv->ch1mm : priv->ch0mm;
+	u32 dw;
+	u32 dw2;
+	unsigned syndrome = 0;
+	unsigned ecc_loc = 0;
+	unsigned merr;
+	unsigned bank;
+	unsigned rank;
+	unsigned cas;
+	unsigned ras;
+
+	pci_read_config_dword(pdev, I5100_VALIDLOG, &dw);
+
+	if (i5100_validlog_redmemvalid(dw)) {
+		pci_read_config_dword(pdev, I5100_REDMEMA, &dw2);
+		syndrome = dw2;
+		pci_read_config_dword(pdev, I5100_REDMEMB, &dw2);
+		ecc_loc = i5100_redmemb_ecc_locator(dw2);
+	}
+
+	if (i5100_validlog_recmemvalid(dw)) {
+		const char *msg;
+
+		pci_read_config_dword(pdev, I5100_RECMEMA, &dw2);
+		merr = i5100_recmema_merr(dw2);
+		bank = i5100_recmema_bank(dw2);
+		rank = i5100_recmema_rank(dw2);
+
+		pci_read_config_dword(pdev, I5100_RECMEMB, &dw2);
+		cas = i5100_recmemb_cas(dw2);
+		ras = i5100_recmemb_ras(dw2);
+
+		/* FIXME:  not really sure if this is what merr is...
+		 */
+		if (!merr)
+			msg = i5100_err_msg(ferr);
+		else
+			msg = i5100_err_msg(nerr);
+
+		i5100_handle_ce(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
+	}
+
+	if (i5100_validlog_nrecmemvalid(dw)) {
+		const char *msg;
+
+		pci_read_config_dword(pdev, I5100_NRECMEMA, &dw2);
+		merr = i5100_nrecmema_merr(dw2);
+		bank = i5100_nrecmema_bank(dw2);
+		rank = i5100_nrecmema_rank(dw2);
+
+		pci_read_config_dword(pdev, I5100_NRECMEMB, &dw2);
+		cas = i5100_nrecmemb_cas(dw2);
+		ras = i5100_nrecmemb_ras(dw2);
+
+		/* FIXME:  not really sure if this is what merr is...
+		 */
+		if (!merr)
+			msg = i5100_err_msg(ferr);
+		else
+			msg = i5100_err_msg(nerr);
+
+		i5100_handle_ue(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
+	}
+
+	pci_write_config_dword(pdev, I5100_VALIDLOG, dw);
+}
+
+static void i5100_check_error(struct mem_ctl_info *mci)
+{
+	struct i5100_priv *priv = mci->pvt_info;
+	u32 dw;
+
+
+	pci_read_config_dword(priv->mc, I5100_FERR_NF_MEM, &dw);
+	if (i5100_ferr_nf_mem_any(dw)) {
+		u32 dw2;
+
+		pci_read_config_dword(priv->mc, I5100_NERR_NF_MEM, &dw2);
+		if (dw2)
+			pci_write_config_dword(priv->mc, I5100_NERR_NF_MEM,
+					       dw2);
+		pci_write_config_dword(priv->mc, I5100_FERR_NF_MEM, dw);
+
+		i5100_read_log(mci, i5100_ferr_nf_mem_chan_indx(dw),
+			       i5100_ferr_nf_mem_any(dw),
+			       i5100_nerr_nf_mem_any(dw2));
+	}
+}
+
+static struct pci_dev *pci_get_device_func(unsigned vendor,
+					   unsigned device,
+					   unsigned func)
+{
+	struct pci_dev *ret = NULL;
+
+	while (1) {
+		ret = pci_get_device(vendor, device, ret);
+
+		if (!ret)
+			break;
+
+		if (PCI_FUNC(ret->devfn) == func)
+			break;
+	}
+
+	return ret;
+}
+
+static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci,
+					    int csrow)
+{
+	struct i5100_priv *priv = mci->pvt_info;
+	const unsigned ctlr_rank = i5100_csrow_to_rank(mci, csrow);
+	const unsigned ctlr = i5100_csrow_to_cntlr(mci, csrow);
+	unsigned addr_lines;
+
+	/* dimm present? */
+	if (!priv->mtr[ctlr][ctlr_rank].present)
+		return 0ULL;
+
+	addr_lines =
+		I5100_DIMM_ADDR_LINES +
+		priv->mtr[ctlr][ctlr_rank].numcol +
+		priv->mtr[ctlr][ctlr_rank].numrow +
+		priv->mtr[ctlr][ctlr_rank].numbank;
+
+	return (unsigned long)
+		((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
+}
+
+static void __devinit i5100_init_mtr(struct mem_ctl_info *mci)
+{
+	struct i5100_priv *priv = mci->pvt_info;
+	struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
+	int i;
+
+	for (i = 0; i < I5100_MAX_CTLRS; i++) {
+		int j;
+		struct pci_dev *pdev = mms[i];
+
+		for (j = 0; j < I5100_MAX_RANKS_PER_CTLR; j++) {
+			const unsigned addr =
+				(j < 4) ? I5100_MTR_0 + j * 2 :
+					  I5100_MTR_4 + (j - 4) * 2;
+			u16 w;
+
+			pci_read_config_word(pdev, addr, &w);
+
+			priv->mtr[i][j].present = i5100_mtr_present(w);
+			priv->mtr[i][j].ethrottle = i5100_mtr_ethrottle(w);
+			priv->mtr[i][j].width = 4 + 4 * i5100_mtr_width(w);
+			priv->mtr[i][j].numbank = 2 + i5100_mtr_numbank(w);
+			priv->mtr[i][j].numrow = 13 + i5100_mtr_numrow(w);
+			priv->mtr[i][j].numcol = 10 + i5100_mtr_numcol(w);
+		}
+	}
+}
+
+/*
+ * FIXME: make this into a real i2c adapter (so that dimm-decode
+ * will work)?
+ */
+static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
+			       u8 ch, u8 slot, u8 addr, u8 *byte)
+{
+	struct i5100_priv *priv = mci->pvt_info;
+	u16 w;
+	unsigned long et;
+
+	pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
+	if (i5100_spddata_busy(w))
+		return -1;
+
+	pci_write_config_dword(priv->mc, I5100_SPDCMD,
+			       i5100_spdcmd_create(0xa, 1, ch * 4 + slot, addr,
+						   0, 0));
+
+	/* wait up to 100ms */
+	et = jiffies + HZ / 10;
+	udelay(100);
+	while (1) {
+		pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
+		if (!i5100_spddata_busy(w))
+			break;
+		udelay(100);
+	}
+
+	if (!i5100_spddata_rdo(w) || i5100_spddata_sbe(w))
+		return -1;
+
+	*byte = i5100_spddata_data(w);
+
+	return 0;
+}
+
+/*
+ * fill dimm chip select map
+ *
+ * FIXME:
+ *   o only valid for 4 ranks per controller
+ *   o not the only way to may chip selects to dimm slots
+ *   o investigate if there is some way to obtain this map from the bios
+ */
+static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
+{
+	struct i5100_priv *priv = mci->pvt_info;
+	int i;
+
+	WARN_ON(priv->ranksperctlr != 4);
+
+	for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
+		int j;
+
+		for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++)
+			priv->dimm_csmap[i][j] = -1; /* default NC */
+	}
+
+	/* only 2 chip selects per slot... */
+	priv->dimm_csmap[0][0] = 0;
+	priv->dimm_csmap[0][1] = 3;
+	priv->dimm_csmap[1][0] = 1;
+	priv->dimm_csmap[1][1] = 2;
+	priv->dimm_csmap[2][0] = 2;
+	priv->dimm_csmap[3][0] = 3;
+}
+
+static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
+					     struct mem_ctl_info *mci)
+{
+	struct i5100_priv *priv = mci->pvt_info;
+	int i;
+
+	for (i = 0; i < I5100_MAX_CTLRS; i++) {
+		int j;
+
+		for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CTLR; j++) {
+			u8 rank;
+
+			if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
+				priv->dimm_numrank[i][j] = 0;
+			else
+				priv->dimm_numrank[i][j] = (rank & 3) + 1;
+		}
+	}
+
+	i5100_init_dimm_csmap(mci);
+}
+
+static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
+					      struct mem_ctl_info *mci)
+{
+	u16 w;
+	u32 dw;
+	struct i5100_priv *priv = mci->pvt_info;
+	struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
+	int i;
+
+	pci_read_config_word(pdev, I5100_TOLM, &w);
+	priv->tolm = (u64) i5100_tolm_tolm(w) * 256 * 1024 * 1024;
+
+	pci_read_config_word(pdev, I5100_MIR0, &w);
+	priv->mir[0].limit = (u64) i5100_mir_limit(w) << 28;
+	priv->mir[0].way[1] = i5100_mir_way1(w);
+	priv->mir[0].way[0] = i5100_mir_way0(w);
+
+	pci_read_config_word(pdev, I5100_MIR1, &w);
+	priv->mir[1].limit = (u64) i5100_mir_limit(w) << 28;
+	priv->mir[1].way[1] = i5100_mir_way1(w);
+	priv->mir[1].way[0] = i5100_mir_way0(w);
+
+	pci_read_config_word(pdev, I5100_AMIR_0, &w);
+	priv->amir[0] = w;
+	pci_read_config_word(pdev, I5100_AMIR_1, &w);
+	priv->amir[1] = w;
+
+	for (i = 0; i < I5100_MAX_CTLRS; i++) {
+		int j;
+
+		for (j = 0; j < 5; j++) {
+			int k;
+
+			pci_read_config_dword(mms[i], I5100_DMIR + j * 4, &dw);
+
+			priv->dmir[i][j].limit =
+				(u64) i5100_dmir_limit(dw) << 28;
+			for (k = 0; k < I5100_MAX_RANKS_PER_DIMM; k++)
+				priv->dmir[i][j].rank[k] =
+					i5100_dmir_rank(dw, k);
+		}
+	}
+
+	i5100_init_mtr(mci);
+}
+
+static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
+{
+	int i;
+	unsigned long total_pages = 0UL;
+	struct i5100_priv *priv = mci->pvt_info;
+
+	for (i = 0; i < mci->nr_csrows; i++) {
+		const unsigned long npages = i5100_npages(mci, i);
+		const unsigned cntlr = i5100_csrow_to_cntlr(mci, i);
+		const unsigned rank = i5100_csrow_to_rank(mci, i);
+
+		if (!npages)
+			continue;
+
+		/*
+		 * FIXME: these two are totally bogus -- I don't see how to
+		 * map them correctly to this structure...
+		 */
+		mci->csrows[i].first_page = total_pages;
+		mci->csrows[i].last_page = total_pages + npages - 1;
+		mci->csrows[i].page_mask = 0UL;
+
+		mci->csrows[i].nr_pages = npages;
+		mci->csrows[i].grain = 32;
+		mci->csrows[i].csrow_idx = i;
+		mci->csrows[i].dtype =
+			(priv->mtr[cntlr][rank].width == 4) ? DEV_X4 : DEV_X8;
+		mci->csrows[i].ue_count = 0;
+		mci->csrows[i].ce_count = 0;
+		mci->csrows[i].mtype = MEM_RDDR2;
+		mci->csrows[i].edac_mode = EDAC_SECDED;
+		mci->csrows[i].mci = mci;
+		mci->csrows[i].nr_channels = 1;
+		mci->csrows[i].channels[0].chan_idx = 0;
+		mci->csrows[i].channels[0].ce_count = 0;
+		mci->csrows[i].channels[0].csrow = mci->csrows + i;
+		snprintf(mci->csrows[i].channels[0].label,
+			 sizeof(mci->csrows[i].channels[0].label),
+			 "DIMM%u", i5100_rank_to_slot(mci, cntlr, rank));
+
+		total_pages += npages;
+	}
+}
+
+static int __devinit i5100_init_one(struct pci_dev *pdev,
+				    const struct pci_device_id *id)
+{
+	int rc;
+	struct mem_ctl_info *mci;
+	struct i5100_priv *priv;
+	struct pci_dev *ch0mm, *ch1mm;
+	int ret = 0;
+	u32 dw;
+	int ranksperch;
+
+	if (PCI_FUNC(pdev->devfn) != 1)
+		return -ENODEV;
+
+	rc = pci_enable_device(pdev);
+	if (rc < 0) {
+		ret = rc;
+		goto bail;
+	}
+
+	/* ECC enabled? */
+	pci_read_config_dword(pdev, I5100_MC, &dw);
+	if (!i5100_mc_errdeten(dw)) {
+		printk(KERN_INFO "i5100_edac: ECC not enabled.\n");
+		ret = -ENODEV;
+		goto bail_pdev;
+	}
+
+	/* figure out how many ranks, from strapped state of 48GB_Mode input */
+	pci_read_config_dword(pdev, I5100_MS, &dw);
+	ranksperch = !!(dw & (1 << 8)) * 2 + 4;
+
+	if (ranksperch != 4) {
+		/* FIXME: get 6 ranks / controller to work - need hw... */
+		printk(KERN_INFO "i5100_edac: unsupported configuration.\n");
+		ret = -ENODEV;
+		goto bail_pdev;
+	}
+
+	/* enable error reporting... */
+	pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw);
+	dw &= ~I5100_FERR_NF_MEM_ANY_MASK;
+	pci_write_config_dword(pdev, I5100_EMASK_MEM, dw);
+
+	/* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */
+	ch0mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
+				    PCI_DEVICE_ID_INTEL_5100_21, 0);
+	if (!ch0mm) {
+		ret = -ENODEV;
+		goto bail_pdev;
+	}
+
+	rc = pci_enable_device(ch0mm);
+	if (rc < 0) {
+		ret = rc;
+		goto bail_ch0;
+	}
+
+	/* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */
+	ch1mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
+				    PCI_DEVICE_ID_INTEL_5100_22, 0);
+	if (!ch1mm) {
+		ret = -ENODEV;
+		goto bail_disable_ch0;
+	}
+
+	rc = pci_enable_device(ch1mm);
+	if (rc < 0) {
+		ret = rc;
+		goto bail_ch1;
+	}
+
+	mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0);
+	if (!mci) {
+		ret = -ENOMEM;
+		goto bail_disable_ch1;
+	}
+
+	mci->dev = &pdev->dev;
+
+	priv = mci->pvt_info;
+	priv->ranksperctlr = ranksperch;
+	priv->mc = pdev;
+	priv->ch0mm = ch0mm;
+	priv->ch1mm = ch1mm;
+
+	i5100_init_dimm_layout(pdev, mci);
+	i5100_init_interleaving(pdev, mci);
+
+	mci->mtype_cap = MEM_FLAG_FB_DDR2;
+	mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+	mci->edac_cap = EDAC_FLAG_SECDED;
+	mci->mod_name = "i5100_edac.c";
+	mci->mod_ver = "not versioned";
+	mci->ctl_name = "i5100";
+	mci->dev_name = pci_name(pdev);
+	mci->ctl_page_to_phys = NULL;
+
+	mci->edac_check = i5100_check_error;
+
+	i5100_init_csrows(mci);
+
+	/* this strange construction seems to be in every driver, dunno why */
+	switch (edac_op_state) {
+	case EDAC_OPSTATE_POLL:
+	case EDAC_OPSTATE_NMI:
+		break;
+	default:
+		edac_op_state = EDAC_OPSTATE_POLL;
+		break;
+	}
+
+	if (edac_mc_add_mc(mci)) {
+		ret = -ENODEV;
+		goto bail_mc;
+	}
+
+	return ret;
+
+bail_mc:
+	edac_mc_free(mci);
+
+bail_disable_ch1:
+	pci_disable_device(ch1mm);
+
+bail_ch1:
+	pci_dev_put(ch1mm);
+
+bail_disable_ch0:
+	pci_disable_device(ch0mm);
+
+bail_ch0:
+	pci_dev_put(ch0mm);
+
+bail_pdev:
+	pci_disable_device(pdev);
+
+bail:
+	return ret;
+}
+
+static void __devexit i5100_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+	struct i5100_priv *priv;
+
+	mci = edac_mc_del_mc(&pdev->dev);
+
+	if (!mci)
+		return;
+
+	priv = mci->pvt_info;
+	pci_disable_device(pdev);
+	pci_disable_device(priv->ch0mm);
+	pci_disable_device(priv->ch1mm);
+	pci_dev_put(priv->ch0mm);
+	pci_dev_put(priv->ch1mm);
+
+	edac_mc_free(mci);
+}
+
+static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
+	/* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, i5100_pci_tbl);
+
+static struct pci_driver i5100_driver = {
+	.name = KBUILD_BASENAME,
+	.probe = i5100_init_one,
+	.remove = __devexit_p(i5100_remove_one),
+	.id_table = i5100_pci_tbl,
+};
+
+static int __init i5100_init(void)
+{
+	int pci_rc;
+
+	pci_rc = pci_register_driver(&i5100_driver);
+
+	return (pci_rc < 0) ? pci_rc : 0;
+}
+
+static void __exit i5100_exit(void)
+{
+	pci_unregister_driver(&i5100_driver);
+}
+
+module_init(i5100_init);
+module_exit(i5100_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR
+    ("Arthur Jones <ajones@riverbed.com>");
+MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers");
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
new file mode 100644
index 0000000..f99d106
--- /dev/null
+++ b/drivers/edac/i5400_edac.c
@@ -0,0 +1,1443 @@
+/*
+ * Intel 5400 class Memory Controllers kernel module (Seaburg)
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Copyright (c) 2008 by:
+ *	 Ben Woodard <woodard@redhat.com>
+ *	 Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * Red Hat Inc. http://www.redhat.com
+ *
+ * Forked and adapted from the i5000_edac driver which was
+ * written by Douglas Thompson Linux Networx <norsk5@xmission.com>
+ *
+ * This module is based on the following document:
+ *
+ * Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet
+ * 	http://developer.intel.com/design/chipsets/datashts/313070.htm
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include <linux/mmzone.h>
+
+#include "edac_core.h"
+
+/*
+ * Alter this version for the I5400 module when modifications are made
+ */
+#define I5400_REVISION    " Ver: 1.0.0 " __DATE__
+
+#define EDAC_MOD_STR      "i5400_edac"
+
+#define i5400_printk(level, fmt, arg...) \
+	edac_printk(level, "i5400", fmt, ##arg)
+
+#define i5400_mc_printk(mci, level, fmt, arg...) \
+	edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg)
+
+/* Limits for i5400 */
+#define NUM_MTRS_PER_BRANCH	4
+#define CHANNELS_PER_BRANCH	2
+#define MAX_DIMMS_PER_CHANNEL	NUM_MTRS_PER_BRANCH
+#define	MAX_CHANNELS		4
+/* max possible csrows per channel */
+#define MAX_CSROWS		(MAX_DIMMS_PER_CHANNEL)
+
+/* Device 16,
+ * Function 0: System Address
+ * Function 1: Memory Branch Map, Control, Errors Register
+ * Function 2: FSB Error Registers
+ *
+ * All 3 functions of Device 16 (0,1,2) share the SAME DID and
+ * uses PCI_DEVICE_ID_INTEL_5400_ERR for device 16 (0,1,2),
+ * PCI_DEVICE_ID_INTEL_5400_FBD0 and PCI_DEVICE_ID_INTEL_5400_FBD1
+ * for device 21 (0,1).
+ */
+
+	/* OFFSETS for Function 0 */
+#define		AMBASE			0x48 /* AMB Mem Mapped Reg Region Base */
+#define		MAXCH			0x56 /* Max Channel Number */
+#define		MAXDIMMPERCH		0x57 /* Max DIMM PER Channel Number */
+
+	/* OFFSETS for Function 1 */
+#define		TOLM			0x6C
+#define		REDMEMB			0x7C
+#define			REC_ECC_LOCATOR_ODD(x)	((x) & 0x3fe00) /* bits [17:9] indicate ODD, [8:0]  indicate EVEN */
+#define		MIR0			0x80
+#define		MIR1			0x84
+#define		AMIR0			0x8c
+#define		AMIR1			0x90
+
+	/* Fatal error registers */
+#define		FERR_FAT_FBD		0x98	/* also called as FERR_FAT_FB_DIMM at datasheet */
+#define			FERR_FAT_FBDCHAN (3<<28)	/* channel index where the highest-order error occurred */
+
+#define		NERR_FAT_FBD		0x9c
+#define		FERR_NF_FBD		0xa0	/* also called as FERR_NFAT_FB_DIMM at datasheet */
+
+	/* Non-fatal error register */
+#define		NERR_NF_FBD		0xa4
+
+	/* Enable error mask */
+#define		EMASK_FBD		0xa8
+
+#define		ERR0_FBD		0xac
+#define		ERR1_FBD		0xb0
+#define		ERR2_FBD		0xb4
+#define		MCERR_FBD		0xb8
+
+	/* No OFFSETS for Device 16 Function 2 */
+
+/*
+ * Device 21,
+ * Function 0: Memory Map Branch 0
+ *
+ * Device 22,
+ * Function 0: Memory Map Branch 1
+ */
+
+	/* OFFSETS for Function 0 */
+#define AMBPRESENT_0	0x64
+#define AMBPRESENT_1	0x66
+#define MTR0		0x80
+#define MTR1		0x82
+#define MTR2		0x84
+#define MTR3		0x86
+
+	/* OFFSETS for Function 1 */
+#define NRECFGLOG		0x74
+#define RECFGLOG		0x78
+#define NRECMEMA		0xbe
+#define NRECMEMB		0xc0
+#define NRECFB_DIMMA		0xc4
+#define NRECFB_DIMMB		0xc8
+#define NRECFB_DIMMC		0xcc
+#define NRECFB_DIMMD		0xd0
+#define NRECFB_DIMME		0xd4
+#define NRECFB_DIMMF		0xd8
+#define REDMEMA			0xdC
+#define RECMEMA			0xf0
+#define RECMEMB			0xf4
+#define RECFB_DIMMA		0xf8
+#define RECFB_DIMMB		0xec
+#define RECFB_DIMMC		0xf0
+#define RECFB_DIMMD		0xf4
+#define RECFB_DIMME		0xf8
+#define RECFB_DIMMF		0xfC
+
+/*
+ * Error indicator bits and masks
+ * Error masks are according with Table 5-17 of i5400 datasheet
+ */
+
+enum error_mask {
+	EMASK_M1  = 1<<0,  /* Memory Write error on non-redundant retry */
+	EMASK_M2  = 1<<1,  /* Memory or FB-DIMM configuration CRC read error */
+	EMASK_M3  = 1<<2,  /* Reserved */
+	EMASK_M4  = 1<<3,  /* Uncorrectable Data ECC on Replay */
+	EMASK_M5  = 1<<4,  /* Aliased Uncorrectable Non-Mirrored Demand Data ECC */
+	EMASK_M6  = 1<<5,  /* Unsupported on i5400 */
+	EMASK_M7  = 1<<6,  /* Aliased Uncorrectable Resilver- or Spare-Copy Data ECC */
+	EMASK_M8  = 1<<7,  /* Aliased Uncorrectable Patrol Data ECC */
+	EMASK_M9  = 1<<8,  /* Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC */
+	EMASK_M10 = 1<<9,  /* Unsupported on i5400 */
+	EMASK_M11 = 1<<10, /* Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC  */
+	EMASK_M12 = 1<<11, /* Non-Aliased Uncorrectable Patrol Data ECC */
+	EMASK_M13 = 1<<12, /* Memory Write error on first attempt */
+	EMASK_M14 = 1<<13, /* FB-DIMM Configuration Write error on first attempt */
+	EMASK_M15 = 1<<14, /* Memory or FB-DIMM configuration CRC read error */
+	EMASK_M16 = 1<<15, /* Channel Failed-Over Occurred */
+	EMASK_M17 = 1<<16, /* Correctable Non-Mirrored Demand Data ECC */
+	EMASK_M18 = 1<<17, /* Unsupported on i5400 */
+	EMASK_M19 = 1<<18, /* Correctable Resilver- or Spare-Copy Data ECC */
+	EMASK_M20 = 1<<19, /* Correctable Patrol Data ECC */
+	EMASK_M21 = 1<<20, /* FB-DIMM Northbound parity error on FB-DIMM Sync Status */
+	EMASK_M22 = 1<<21, /* SPD protocol Error */
+	EMASK_M23 = 1<<22, /* Non-Redundant Fast Reset Timeout */
+	EMASK_M24 = 1<<23, /* Refresh error */
+	EMASK_M25 = 1<<24, /* Memory Write error on redundant retry */
+	EMASK_M26 = 1<<25, /* Redundant Fast Reset Timeout */
+	EMASK_M27 = 1<<26, /* Correctable Counter Threshold Exceeded */
+	EMASK_M28 = 1<<27, /* DIMM-Spare Copy Completed */
+	EMASK_M29 = 1<<28, /* DIMM-Isolation Completed */
+};
+
+/*
+ * Names to translate bit error into something useful
+ */
+static const char *error_name[] = {
+	[0]  = "Memory Write error on non-redundant retry",
+	[1]  = "Memory or FB-DIMM configuration CRC read error",
+	/* Reserved */
+	[3]  = "Uncorrectable Data ECC on Replay",
+	[4]  = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
+	/* M6 Unsupported on i5400 */
+	[6]  = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
+	[7]  = "Aliased Uncorrectable Patrol Data ECC",
+	[8]  = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
+	/* M10 Unsupported on i5400 */
+	[10] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
+	[11] = "Non-Aliased Uncorrectable Patrol Data ECC",
+	[12] = "Memory Write error on first attempt",
+	[13] = "FB-DIMM Configuration Write error on first attempt",
+	[14] = "Memory or FB-DIMM configuration CRC read error",
+	[15] = "Channel Failed-Over Occurred",
+	[16] = "Correctable Non-Mirrored Demand Data ECC",
+	/* M18 Unsupported on i5400 */
+	[18] = "Correctable Resilver- or Spare-Copy Data ECC",
+	[19] = "Correctable Patrol Data ECC",
+	[20] = "FB-DIMM Northbound parity error on FB-DIMM Sync Status",
+	[21] = "SPD protocol Error",
+	[22] = "Non-Redundant Fast Reset Timeout",
+	[23] = "Refresh error",
+	[24] = "Memory Write error on redundant retry",
+	[25] = "Redundant Fast Reset Timeout",
+	[26] = "Correctable Counter Threshold Exceeded",
+	[27] = "DIMM-Spare Copy Completed",
+	[28] = "DIMM-Isolation Completed",
+};
+
+/* Fatal errors */
+#define ERROR_FAT_MASK		(EMASK_M1 | \
+				 EMASK_M2 | \
+				 EMASK_M23)
+
+/* Correctable errors */
+#define ERROR_NF_CORRECTABLE	(EMASK_M27 | \
+				 EMASK_M20 | \
+				 EMASK_M19 | \
+				 EMASK_M18 | \
+				 EMASK_M17 | \
+				 EMASK_M16)
+#define ERROR_NF_DIMM_SPARE	(EMASK_M29 | \
+				 EMASK_M28)
+#define ERROR_NF_SPD_PROTOCOL	(EMASK_M22)
+#define ERROR_NF_NORTH_CRC	(EMASK_M21)
+
+/* Recoverable errors */
+#define ERROR_NF_RECOVERABLE	(EMASK_M26 | \
+				 EMASK_M25 | \
+				 EMASK_M24 | \
+				 EMASK_M15 | \
+				 EMASK_M14 | \
+				 EMASK_M13 | \
+				 EMASK_M12 | \
+				 EMASK_M11 | \
+				 EMASK_M9  | \
+				 EMASK_M8  | \
+				 EMASK_M7  | \
+				 EMASK_M5)
+
+/* uncorrectable errors */
+#define ERROR_NF_UNCORRECTABLE	(EMASK_M4)
+
+/* mask to all non-fatal errors */
+#define ERROR_NF_MASK		(ERROR_NF_CORRECTABLE   | \
+				 ERROR_NF_UNCORRECTABLE | \
+				 ERROR_NF_RECOVERABLE   | \
+				 ERROR_NF_DIMM_SPARE    | \
+				 ERROR_NF_SPD_PROTOCOL  | \
+				 ERROR_NF_NORTH_CRC)
+
+/*
+ * Define error masks for the several registers
+ */
+
+/* Enable all fatal and non fatal errors */
+#define ENABLE_EMASK_ALL	(ERROR_FAT_MASK | ERROR_NF_MASK)
+
+/* mask for fatal error registers */
+#define FERR_FAT_MASK ERROR_FAT_MASK
+
+/* masks for non-fatal error register */
+static inline int to_nf_mask(unsigned int mask)
+{
+	return (mask & EMASK_M29) | (mask >> 3);
+};
+
+static inline int from_nf_ferr(unsigned int mask)
+{
+	return (mask & EMASK_M29) |		/* Bit 28 */
+	       (mask & ((1 << 28) - 1) << 3);	/* Bits 0 to 27 */
+};
+
+#define FERR_NF_MASK		to_nf_mask(ERROR_NF_MASK)
+#define FERR_NF_CORRECTABLE	to_nf_mask(ERROR_NF_CORRECTABLE)
+#define FERR_NF_DIMM_SPARE	to_nf_mask(ERROR_NF_DIMM_SPARE)
+#define FERR_NF_SPD_PROTOCOL	to_nf_mask(ERROR_NF_SPD_PROTOCOL)
+#define FERR_NF_NORTH_CRC	to_nf_mask(ERROR_NF_NORTH_CRC)
+#define FERR_NF_RECOVERABLE	to_nf_mask(ERROR_NF_RECOVERABLE)
+#define FERR_NF_UNCORRECTABLE	to_nf_mask(ERROR_NF_UNCORRECTABLE)
+
+/* Defines to extract the vaious fields from the
+ *	MTRx - Memory Technology Registers
+ */
+#define MTR_DIMMS_PRESENT(mtr)		((mtr) & (1 << 10))
+#define MTR_DIMMS_ETHROTTLE(mtr)	((mtr) & (1 << 9))
+#define MTR_DRAM_WIDTH(mtr)		(((mtr) & (1 << 8)) ? 8 : 4)
+#define MTR_DRAM_BANKS(mtr)		(((mtr) & (1 << 6)) ? 8 : 4)
+#define MTR_DRAM_BANKS_ADDR_BITS(mtr)	((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2)
+#define MTR_DIMM_RANK(mtr)		(((mtr) >> 5) & 0x1)
+#define MTR_DIMM_RANK_ADDR_BITS(mtr)	(MTR_DIMM_RANK(mtr) ? 2 : 1)
+#define MTR_DIMM_ROWS(mtr)		(((mtr) >> 2) & 0x3)
+#define MTR_DIMM_ROWS_ADDR_BITS(mtr)	(MTR_DIMM_ROWS(mtr) + 13)
+#define MTR_DIMM_COLS(mtr)		((mtr) & 0x3)
+#define MTR_DIMM_COLS_ADDR_BITS(mtr)	(MTR_DIMM_COLS(mtr) + 10)
+
+/* This applies to FERR_NF_FB-DIMM as well as FERR_FAT_FB-DIMM */
+static inline int extract_fbdchan_indx(u32 x)
+{
+	return (x>>28) & 0x3;
+}
+
+#ifdef CONFIG_EDAC_DEBUG
+/* MTR NUMROW */
+static const char *numrow_toString[] = {
+	"8,192 - 13 rows",
+	"16,384 - 14 rows",
+	"32,768 - 15 rows",
+	"65,536 - 16 rows"
+};
+
+/* MTR NUMCOL */
+static const char *numcol_toString[] = {
+	"1,024 - 10 columns",
+	"2,048 - 11 columns",
+	"4,096 - 12 columns",
+	"reserved"
+};
+#endif
+
+/* Device name and register DID (Device ID) */
+struct i5400_dev_info {
+	const char *ctl_name;	/* name for this device */
+	u16 fsb_mapping_errors;	/* DID for the branchmap,control */
+};
+
+/* Table of devices attributes supported by this driver */
+static const struct i5400_dev_info i5400_devs[] = {
+	{
+		.ctl_name = "I5400",
+		.fsb_mapping_errors = PCI_DEVICE_ID_INTEL_5400_ERR,
+	},
+};
+
+struct i5400_dimm_info {
+	int megabytes;		/* size, 0 means not present  */
+};
+
+/* driver private data structure */
+struct i5400_pvt {
+	struct pci_dev *system_address;		/* 16.0 */
+	struct pci_dev *branchmap_werrors;	/* 16.1 */
+	struct pci_dev *fsb_error_regs;		/* 16.2 */
+	struct pci_dev *branch_0;		/* 21.0 */
+	struct pci_dev *branch_1;		/* 22.0 */
+
+	u16 tolm;				/* top of low memory */
+	u64 ambase;				/* AMB BAR */
+
+	u16 mir0, mir1;
+
+	u16 b0_mtr[NUM_MTRS_PER_BRANCH];	/* Memory Technlogy Reg */
+	u16 b0_ambpresent0;			/* Branch 0, Channel 0 */
+	u16 b0_ambpresent1;			/* Brnach 0, Channel 1 */
+
+	u16 b1_mtr[NUM_MTRS_PER_BRANCH];	/* Memory Technlogy Reg */
+	u16 b1_ambpresent0;			/* Branch 1, Channel 8 */
+	u16 b1_ambpresent1;			/* Branch 1, Channel 1 */
+
+	/* DIMM information matrix, allocating architecture maximums */
+	struct i5400_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS];
+
+	/* Actual values for this controller */
+	int maxch;				/* Max channels */
+	int maxdimmperch;			/* Max DIMMs per channel */
+};
+
+/* I5400 MCH error information retrieved from Hardware */
+struct i5400_error_info {
+	/* These registers are always read from the MC */
+	u32 ferr_fat_fbd;	/* First Errors Fatal */
+	u32 nerr_fat_fbd;	/* Next Errors Fatal */
+	u32 ferr_nf_fbd;	/* First Errors Non-Fatal */
+	u32 nerr_nf_fbd;	/* Next Errors Non-Fatal */
+
+	/* These registers are input ONLY if there was a Recoverable Error */
+	u32 redmemb;		/* Recoverable Mem Data Error log B */
+	u16 recmema;		/* Recoverable Mem Error log A */
+	u32 recmemb;		/* Recoverable Mem Error log B */
+
+	/* These registers are input ONLY if there was a Non-Rec Error */
+	u16 nrecmema;		/* Non-Recoverable Mem log A */
+	u16 nrecmemb;		/* Non-Recoverable Mem log B */
+
+};
+
+/* note that nrec_rdwr changed from NRECMEMA to NRECMEMB between the 5000 and
+   5400 better to use an inline function than a macro in this case */
+static inline int nrec_bank(struct i5400_error_info *info)
+{
+	return ((info->nrecmema) >> 12) & 0x7;
+}
+static inline int nrec_rank(struct i5400_error_info *info)
+{
+	return ((info->nrecmema) >> 8) & 0xf;
+}
+static inline int nrec_buf_id(struct i5400_error_info *info)
+{
+	return ((info->nrecmema)) & 0xff;
+}
+static inline int nrec_rdwr(struct i5400_error_info *info)
+{
+	return (info->nrecmemb) >> 31;
+}
+/* This applies to both NREC and REC string so it can be used with nrec_rdwr
+   and rec_rdwr */
+static inline const char *rdwr_str(int rdwr)
+{
+	return rdwr ? "Write" : "Read";
+}
+static inline int nrec_cas(struct i5400_error_info *info)
+{
+	return ((info->nrecmemb) >> 16) & 0x1fff;
+}
+static inline int nrec_ras(struct i5400_error_info *info)
+{
+	return (info->nrecmemb) & 0xffff;
+}
+static inline int rec_bank(struct i5400_error_info *info)
+{
+	return ((info->recmema) >> 12) & 0x7;
+}
+static inline int rec_rank(struct i5400_error_info *info)
+{
+	return ((info->recmema) >> 8) & 0xf;
+}
+static inline int rec_rdwr(struct i5400_error_info *info)
+{
+	return (info->recmemb) >> 31;
+}
+static inline int rec_cas(struct i5400_error_info *info)
+{
+	return ((info->recmemb) >> 16) & 0x1fff;
+}
+static inline int rec_ras(struct i5400_error_info *info)
+{
+	return (info->recmemb) & 0xffff;
+}
+
+static struct edac_pci_ctl_info *i5400_pci;
+
+/*
+ *	i5400_get_error_info	Retrieve the hardware error information from
+ *				the hardware and cache it in the 'info'
+ *				structure
+ */
+static void i5400_get_error_info(struct mem_ctl_info *mci,
+				 struct i5400_error_info *info)
+{
+	struct i5400_pvt *pvt;
+	u32 value;
+
+	pvt = mci->pvt_info;
+
+	/* read in the 1st FATAL error register */
+	pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value);
+
+	/* Mask only the bits that the doc says are valid
+	 */
+	value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK);
+
+	/* If there is an error, then read in the
+	   NEXT FATAL error register and the Memory Error Log Register A
+	 */
+	if (value & FERR_FAT_MASK) {
+		info->ferr_fat_fbd = value;
+
+		/* harvest the various error data we need */
+		pci_read_config_dword(pvt->branchmap_werrors,
+				NERR_FAT_FBD, &info->nerr_fat_fbd);
+		pci_read_config_word(pvt->branchmap_werrors,
+				NRECMEMA, &info->nrecmema);
+		pci_read_config_word(pvt->branchmap_werrors,
+				NRECMEMB, &info->nrecmemb);
+
+		/* Clear the error bits, by writing them back */
+		pci_write_config_dword(pvt->branchmap_werrors,
+				FERR_FAT_FBD, value);
+	} else {
+		info->ferr_fat_fbd = 0;
+		info->nerr_fat_fbd = 0;
+		info->nrecmema = 0;
+		info->nrecmemb = 0;
+	}
+
+	/* read in the 1st NON-FATAL error register */
+	pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value);
+
+	/* If there is an error, then read in the 1st NON-FATAL error
+	 * register as well */
+	if (value & FERR_NF_MASK) {
+		info->ferr_nf_fbd = value;
+
+		/* harvest the various error data we need */
+		pci_read_config_dword(pvt->branchmap_werrors,
+				NERR_NF_FBD, &info->nerr_nf_fbd);
+		pci_read_config_word(pvt->branchmap_werrors,
+				RECMEMA, &info->recmema);
+		pci_read_config_dword(pvt->branchmap_werrors,
+				RECMEMB, &info->recmemb);
+		pci_read_config_dword(pvt->branchmap_werrors,
+				REDMEMB, &info->redmemb);
+
+		/* Clear the error bits, by writing them back */
+		pci_write_config_dword(pvt->branchmap_werrors,
+				FERR_NF_FBD, value);
+	} else {
+		info->ferr_nf_fbd = 0;
+		info->nerr_nf_fbd = 0;
+		info->recmema = 0;
+		info->recmemb = 0;
+		info->redmemb = 0;
+	}
+}
+
+/*
+ * i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
+ * 					struct i5400_error_info *info,
+ * 					int handle_errors);
+ *
+ *	handle the Intel FATAL and unrecoverable errors, if any
+ */
+static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
+				    struct i5400_error_info *info,
+				    unsigned long allErrors)
+{
+	char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80];
+	int branch;
+	int channel;
+	int bank;
+	int buf_id;
+	int rank;
+	int rdwr;
+	int ras, cas;
+	int errnum;
+	char *type = NULL;
+
+	if (!allErrors)
+		return;		/* if no error, return now */
+
+	if (allErrors &  ERROR_FAT_MASK)
+		type = "FATAL";
+	else if (allErrors & FERR_NF_UNCORRECTABLE)
+		type = "NON-FATAL uncorrected";
+	else
+		type = "NON-FATAL recoverable";
+
+	/* ONLY ONE of the possible error bits will be set, as per the docs */
+
+	branch = extract_fbdchan_indx(info->ferr_fat_fbd);
+	channel = branch;
+
+	/* Use the NON-Recoverable macros to extract data */
+	bank = nrec_bank(info);
+	rank = nrec_rank(info);
+	buf_id = nrec_buf_id(info);
+	rdwr = nrec_rdwr(info);
+	ras = nrec_ras(info);
+	cas = nrec_cas(info);
+
+	debugf0("\t\tCSROW= %d  Channels= %d,%d  (Branch= %d "
+		"DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
+		rank, channel, channel + 1, branch >> 1, bank,
+		buf_id, rdwr_str(rdwr), ras, cas);
+
+	/* Only 1 bit will be on */
+	errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
+
+	/* Form out message */
+	snprintf(msg, sizeof(msg),
+		 "%s (Branch=%d DRAM-Bank=%d Buffer ID = %d RDWR=%s "
+		 "RAS=%d CAS=%d %s Err=0x%lx (%s))",
+		 type, branch >> 1, bank, buf_id, rdwr_str(rdwr), ras, cas,
+		 type, allErrors, error_name[errnum]);
+
+	/* Call the helper to output message */
+	edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+}
+
+/*
+ * i5400_process_fatal_error_info(struct mem_ctl_info *mci,
+ * 				struct i5400_error_info *info,
+ * 				int handle_errors);
+ *
+ *	handle the Intel NON-FATAL errors, if any
+ */
+static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
+					struct i5400_error_info *info)
+{
+	char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80];
+	unsigned long allErrors;
+	int branch;
+	int channel;
+	int bank;
+	int rank;
+	int rdwr;
+	int ras, cas;
+	int errnum;
+
+	/* mask off the Error bits that are possible */
+	allErrors = from_nf_ferr(info->ferr_nf_fbd & FERR_NF_MASK);
+	if (!allErrors)
+		return;		/* if no error, return now */
+
+	/* ONLY ONE of the possible error bits will be set, as per the docs */
+
+	if (allErrors & (ERROR_NF_UNCORRECTABLE | ERROR_NF_RECOVERABLE)) {
+		i5400_proccess_non_recoverable_info(mci, info, allErrors);
+		return;
+	}
+
+	/* Correctable errors */
+	if (allErrors & ERROR_NF_CORRECTABLE) {
+		debugf0("\tCorrected bits= 0x%lx\n", allErrors);
+
+		branch = extract_fbdchan_indx(info->ferr_nf_fbd);
+
+		channel = 0;
+		if (REC_ECC_LOCATOR_ODD(info->redmemb))
+			channel = 1;
+
+		/* Convert channel to be based from zero, instead of
+		 * from branch base of 0 */
+		channel += branch;
+
+		bank = rec_bank(info);
+		rank = rec_rank(info);
+		rdwr = rec_rdwr(info);
+		ras = rec_ras(info);
+		cas = rec_cas(info);
+
+		/* Only 1 bit will be on */
+		errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
+
+		debugf0("\t\tCSROW= %d Channel= %d  (Branch %d "
+			"DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+			rank, channel, branch >> 1, bank,
+			rdwr_str(rdwr), ras, cas);
+
+		/* Form out message */
+		snprintf(msg, sizeof(msg),
+			 "Corrected error (Branch=%d DRAM-Bank=%d RDWR=%s "
+			 "RAS=%d CAS=%d, CE Err=0x%lx (%s))",
+			 branch >> 1, bank, rdwr_str(rdwr), ras, cas,
+			 allErrors, error_name[errnum]);
+
+		/* Call the helper to output message */
+		edac_mc_handle_fbd_ce(mci, rank, channel, msg);
+
+		return;
+	}
+
+	/* Miscelaneous errors */
+	errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
+
+	branch = extract_fbdchan_indx(info->ferr_nf_fbd);
+
+	i5400_mc_printk(mci, KERN_EMERG,
+			"Non-Fatal misc error (Branch=%d Err=%#lx (%s))",
+			branch >> 1, allErrors, error_name[errnum]);
+}
+
+/*
+ *	i5400_process_error_info	Process the error info that is
+ *	in the 'info' structure, previously retrieved from hardware
+ */
+static void i5400_process_error_info(struct mem_ctl_info *mci,
+				struct i5400_error_info *info)
+{	u32 allErrors;
+
+	/* First handle any fatal errors that occurred */
+	allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK);
+	i5400_proccess_non_recoverable_info(mci, info, allErrors);
+
+	/* now handle any non-fatal errors that occurred */
+	i5400_process_nonfatal_error_info(mci, info);
+}
+
+/*
+ *	i5400_clear_error	Retrieve any error from the hardware
+ *				but do NOT process that error.
+ *				Used for 'clearing' out of previous errors
+ *				Called by the Core module.
+ */
+static void i5400_clear_error(struct mem_ctl_info *mci)
+{
+	struct i5400_error_info info;
+
+	i5400_get_error_info(mci, &info);
+}
+
+/*
+ *	i5400_check_error	Retrieve and process errors reported by the
+ *				hardware. Called by the Core module.
+ */
+static void i5400_check_error(struct mem_ctl_info *mci)
+{
+	struct i5400_error_info info;
+	debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+	i5400_get_error_info(mci, &info);
+	i5400_process_error_info(mci, &info);
+}
+
+/*
+ *	i5400_put_devices	'put' all the devices that we have
+ *				reserved via 'get'
+ */
+static void i5400_put_devices(struct mem_ctl_info *mci)
+{
+	struct i5400_pvt *pvt;
+
+	pvt = mci->pvt_info;
+
+	/* Decrement usage count for devices */
+	pci_dev_put(pvt->branch_1);
+	pci_dev_put(pvt->branch_0);
+	pci_dev_put(pvt->fsb_error_regs);
+	pci_dev_put(pvt->branchmap_werrors);
+}
+
+/*
+ *	i5400_get_devices	Find and perform 'get' operation on the MCH's
+ *			device/functions we want to reference for this driver
+ *
+ *			Need to 'get' device 16 func 1 and func 2
+ */
+static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx)
+{
+	struct i5400_pvt *pvt;
+	struct pci_dev *pdev;
+
+	pvt = mci->pvt_info;
+	pvt->branchmap_werrors = NULL;
+	pvt->fsb_error_regs = NULL;
+	pvt->branch_0 = NULL;
+	pvt->branch_1 = NULL;
+
+	/* Attempt to 'get' the MCH register we want */
+	pdev = NULL;
+	while (!pvt->branchmap_werrors || !pvt->fsb_error_regs) {
+		pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+				      PCI_DEVICE_ID_INTEL_5400_ERR, pdev);
+		if (!pdev) {
+			/* End of list, leave */
+			i5400_printk(KERN_ERR,
+				"'system address,Process Bus' "
+				"device not found:"
+				"vendor 0x%x device 0x%x ERR funcs "
+				"(broken BIOS?)\n",
+				PCI_VENDOR_ID_INTEL,
+				PCI_DEVICE_ID_INTEL_5400_ERR);
+			goto error;
+		}
+
+		/* Store device 16 funcs 1 and 2 */
+		switch (PCI_FUNC(pdev->devfn)) {
+		case 1:
+			pvt->branchmap_werrors = pdev;
+			break;
+		case 2:
+			pvt->fsb_error_regs = pdev;
+			break;
+		}
+	}
+
+	debugf1("System Address, processor bus- PCI Bus ID: %s  %x:%x\n",
+		pci_name(pvt->system_address),
+		pvt->system_address->vendor, pvt->system_address->device);
+	debugf1("Branchmap, control and errors - PCI Bus ID: %s  %x:%x\n",
+		pci_name(pvt->branchmap_werrors),
+		pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device);
+	debugf1("FSB Error Regs - PCI Bus ID: %s  %x:%x\n",
+		pci_name(pvt->fsb_error_regs),
+		pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
+
+	pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL,
+				       PCI_DEVICE_ID_INTEL_5400_FBD0, NULL);
+	if (!pvt->branch_0) {
+		i5400_printk(KERN_ERR,
+			"MC: 'BRANCH 0' device not found:"
+			"vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
+			PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_FBD0);
+		goto error;
+	}
+
+	/* If this device claims to have more than 2 channels then
+	 * fetch Branch 1's information
+	 */
+	if (pvt->maxch < CHANNELS_PER_BRANCH)
+		return 0;
+
+	pvt->branch_1 = pci_get_device(PCI_VENDOR_ID_INTEL,
+				       PCI_DEVICE_ID_INTEL_5400_FBD1, NULL);
+	if (!pvt->branch_1) {
+		i5400_printk(KERN_ERR,
+			"MC: 'BRANCH 1' device not found:"
+			"vendor 0x%x device 0x%x Func 0 "
+			"(broken BIOS?)\n",
+			PCI_VENDOR_ID_INTEL,
+			PCI_DEVICE_ID_INTEL_5400_FBD1);
+		goto error;
+	}
+
+	return 0;
+
+error:
+	i5400_put_devices(mci);
+	return -ENODEV;
+}
+
+/*
+ *	determine_amb_present
+ *
+ *		the information is contained in NUM_MTRS_PER_BRANCH different
+ *		registers determining which of the NUM_MTRS_PER_BRANCH requires
+ *              knowing which channel is in question
+ *
+ *	2 branches, each with 2 channels
+ *		b0_ambpresent0 for channel '0'
+ *		b0_ambpresent1 for channel '1'
+ *		b1_ambpresent0 for channel '2'
+ *		b1_ambpresent1 for channel '3'
+ */
+static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel)
+{
+	int amb_present;
+
+	if (channel < CHANNELS_PER_BRANCH) {
+		if (channel & 0x1)
+			amb_present = pvt->b0_ambpresent1;
+		else
+			amb_present = pvt->b0_ambpresent0;
+	} else {
+		if (channel & 0x1)
+			amb_present = pvt->b1_ambpresent1;
+		else
+			amb_present = pvt->b1_ambpresent0;
+	}
+
+	return amb_present;
+}
+
+/*
+ * determine_mtr(pvt, csrow, channel)
+ *
+ * return the proper MTR register as determine by the csrow and desired channel
+ */
+static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
+{
+	int mtr;
+	int n;
+
+	/* There is one MTR for each slot pair of FB-DIMMs,
+	   Each slot pair may be at branch 0 or branch 1.
+	 */
+	n = csrow;
+
+	if (n >= NUM_MTRS_PER_BRANCH) {
+		debugf0("ERROR: trying to access an invalid csrow: %d\n",
+			csrow);
+		return 0;
+	}
+
+	if (channel < CHANNELS_PER_BRANCH)
+		mtr = pvt->b0_mtr[n];
+	else
+		mtr = pvt->b1_mtr[n];
+
+	return mtr;
+}
+
+/*
+ */
+static void decode_mtr(int slot_row, u16 mtr)
+{
+	int ans;
+
+	ans = MTR_DIMMS_PRESENT(mtr);
+
+	debugf2("\tMTR%d=0x%x:  DIMMs are %s\n", slot_row, mtr,
+		ans ? "Present" : "NOT Present");
+	if (!ans)
+		return;
+
+	debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
+
+	debugf2("\t\tELECTRICAL THROTTLING is %s\n",
+		MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
+
+	debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
+	debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single");
+	debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]);
+	debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
+}
+
+static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
+			struct i5400_dimm_info *dinfo)
+{
+	int mtr;
+	int amb_present_reg;
+	int addrBits;
+
+	mtr = determine_mtr(pvt, csrow, channel);
+	if (MTR_DIMMS_PRESENT(mtr)) {
+		amb_present_reg = determine_amb_present_reg(pvt, channel);
+
+		/* Determine if there is a DIMM present in this DIMM slot */
+		if (amb_present_reg & (1 << csrow)) {
+			/* Start with the number of bits for a Bank
+			 * on the DRAM */
+			addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
+			/* Add thenumber of ROW bits */
+			addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
+			/* add the number of COLUMN bits */
+			addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
+			/* add the number of RANK bits */
+			addrBits += MTR_DIMM_RANK(mtr);
+
+			addrBits += 6;	/* add 64 bits per DIMM */
+			addrBits -= 20;	/* divide by 2^^20 */
+			addrBits -= 3;	/* 8 bits per bytes */
+
+			dinfo->megabytes = 1 << addrBits;
+		}
+	}
+}
+
+/*
+ *	calculate_dimm_size
+ *
+ *	also will output a DIMM matrix map, if debug is enabled, for viewing
+ *	how the DIMMs are populated
+ */
+static void calculate_dimm_size(struct i5400_pvt *pvt)
+{
+	struct i5400_dimm_info *dinfo;
+	int csrow, max_csrows;
+	char *p, *mem_buffer;
+	int space, n;
+	int channel;
+
+	/* ================= Generate some debug output ================= */
+	space = PAGE_SIZE;
+	mem_buffer = p = kmalloc(space, GFP_KERNEL);
+	if (p == NULL) {
+		i5400_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n",
+			__FILE__, __func__);
+		return;
+	}
+
+	/* Scan all the actual CSROWS
+	 * and calculate the information for each DIMM
+	 * Start with the highest csrow first, to display it first
+	 * and work toward the 0th csrow
+	 */
+	max_csrows = pvt->maxdimmperch;
+	for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
+
+		/* on an odd csrow, first output a 'boundary' marker,
+		 * then reset the message buffer  */
+		if (csrow & 0x1) {
+			n = snprintf(p, space, "---------------------------"
+					"--------------------------------");
+			p += n;
+			space -= n;
+			debugf2("%s\n", mem_buffer);
+			p = mem_buffer;
+			space = PAGE_SIZE;
+		}
+		n = snprintf(p, space, "csrow %2d    ", csrow);
+		p += n;
+		space -= n;
+
+		for (channel = 0; channel < pvt->maxch; channel++) {
+			dinfo = &pvt->dimm_info[csrow][channel];
+			handle_channel(pvt, csrow, channel, dinfo);
+			n = snprintf(p, space, "%4d MB   | ", dinfo->megabytes);
+			p += n;
+			space -= n;
+		}
+		debugf2("%s\n", mem_buffer);
+		p = mem_buffer;
+		space = PAGE_SIZE;
+	}
+
+	/* Output the last bottom 'boundary' marker */
+	n = snprintf(p, space, "---------------------------"
+			"--------------------------------");
+	p += n;
+	space -= n;
+	debugf2("%s\n", mem_buffer);
+	p = mem_buffer;
+	space = PAGE_SIZE;
+
+	/* now output the 'channel' labels */
+	n = snprintf(p, space, "            ");
+	p += n;
+	space -= n;
+	for (channel = 0; channel < pvt->maxch; channel++) {
+		n = snprintf(p, space, "channel %d | ", channel);
+		p += n;
+		space -= n;
+	}
+
+	/* output the last message and free buffer */
+	debugf2("%s\n", mem_buffer);
+	kfree(mem_buffer);
+}
+
+/*
+ *	i5400_get_mc_regs	read in the necessary registers and
+ *				cache locally
+ *
+ *			Fills in the private data members
+ */
+static void i5400_get_mc_regs(struct mem_ctl_info *mci)
+{
+	struct i5400_pvt *pvt;
+	u32 actual_tolm;
+	u16 limit;
+	int slot_row;
+	int maxch;
+	int maxdimmperch;
+	int way0, way1;
+
+	pvt = mci->pvt_info;
+
+	pci_read_config_dword(pvt->system_address, AMBASE,
+			(u32 *) &pvt->ambase);
+	pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
+			((u32 *) &pvt->ambase) + sizeof(u32));
+
+	maxdimmperch = pvt->maxdimmperch;
+	maxch = pvt->maxch;
+
+	debugf2("AMBASE= 0x%lx  MAXCH= %d  MAX-DIMM-Per-CH= %d\n",
+		(long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
+
+	/* Get the Branch Map regs */
+	pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
+	pvt->tolm >>= 12;
+	debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
+		pvt->tolm);
+
+	actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
+	debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
+		actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
+
+	pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
+	pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
+
+	/* Get the MIR[0-1] regs */
+	limit = (pvt->mir0 >> 4) & 0x0fff;
+	way0 = pvt->mir0 & 0x1;
+	way1 = pvt->mir0 & 0x2;
+	debugf2("MIR0: limit= 0x%x  WAY1= %u  WAY0= %x\n", limit, way1, way0);
+	limit = (pvt->mir1 >> 4) & 0xfff;
+	way0 = pvt->mir1 & 0x1;
+	way1 = pvt->mir1 & 0x2;
+	debugf2("MIR1: limit= 0x%x  WAY1= %u  WAY0= %x\n", limit, way1, way0);
+
+	/* Get the set of MTR[0-3] regs by each branch */
+	for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) {
+		int where = MTR0 + (slot_row * sizeof(u16));
+
+		/* Branch 0 set of MTR registers */
+		pci_read_config_word(pvt->branch_0, where,
+				&pvt->b0_mtr[slot_row]);
+
+		debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where,
+			pvt->b0_mtr[slot_row]);
+
+		if (pvt->maxch < CHANNELS_PER_BRANCH) {
+			pvt->b1_mtr[slot_row] = 0;
+			continue;
+		}
+
+		/* Branch 1 set of MTR registers */
+		pci_read_config_word(pvt->branch_1, where,
+				&pvt->b1_mtr[slot_row]);
+		debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, where,
+			pvt->b1_mtr[slot_row]);
+	}
+
+	/* Read and dump branch 0's MTRs */
+	debugf2("\nMemory Technology Registers:\n");
+	debugf2("   Branch 0:\n");
+	for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
+		decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
+
+	pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
+			&pvt->b0_ambpresent0);
+	debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
+	pci_read_config_word(pvt->branch_0, AMBPRESENT_1,
+			&pvt->b0_ambpresent1);
+	debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
+
+	/* Only if we have 2 branchs (4 channels) */
+	if (pvt->maxch < CHANNELS_PER_BRANCH) {
+		pvt->b1_ambpresent0 = 0;
+		pvt->b1_ambpresent1 = 0;
+	} else {
+		/* Read and dump  branch 1's MTRs */
+		debugf2("   Branch 1:\n");
+		for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
+			decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
+
+		pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
+				&pvt->b1_ambpresent0);
+		debugf2("\t\tAMB-Branch 1-present0 0x%x:\n",
+			pvt->b1_ambpresent0);
+		pci_read_config_word(pvt->branch_1, AMBPRESENT_1,
+				&pvt->b1_ambpresent1);
+		debugf2("\t\tAMB-Branch 1-present1 0x%x:\n",
+			pvt->b1_ambpresent1);
+	}
+
+	/* Go and determine the size of each DIMM and place in an
+	 * orderly matrix */
+	calculate_dimm_size(pvt);
+}
+
+/*
+ *	i5400_init_csrows	Initialize the 'csrows' table within
+ *				the mci control	structure with the
+ *				addressing of memory.
+ *
+ *	return:
+ *		0	success
+ *		1	no actual memory found on this MC
+ */
+static int i5400_init_csrows(struct mem_ctl_info *mci)
+{
+	struct i5400_pvt *pvt;
+	struct csrow_info *p_csrow;
+	int empty, channel_count;
+	int max_csrows;
+	int mtr;
+	int csrow_megs;
+	int channel;
+	int csrow;
+
+	pvt = mci->pvt_info;
+
+	channel_count = pvt->maxch;
+	max_csrows = pvt->maxdimmperch;
+
+	empty = 1;		/* Assume NO memory */
+
+	for (csrow = 0; csrow < max_csrows; csrow++) {
+		p_csrow = &mci->csrows[csrow];
+
+		p_csrow->csrow_idx = csrow;
+
+		/* use branch 0 for the basis */
+		mtr = determine_mtr(pvt, csrow, 0);
+
+		/* if no DIMMS on this row, continue */
+		if (!MTR_DIMMS_PRESENT(mtr))
+			continue;
+
+		/* FAKE OUT VALUES, FIXME */
+		p_csrow->first_page = 0 + csrow * 20;
+		p_csrow->last_page = 9 + csrow * 20;
+		p_csrow->page_mask = 0xFFF;
+
+		p_csrow->grain = 8;
+
+		csrow_megs = 0;
+		for (channel = 0; channel < pvt->maxch; channel++)
+			csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
+
+		p_csrow->nr_pages = csrow_megs << 8;
+
+		/* Assume DDR2 for now */
+		p_csrow->mtype = MEM_FB_DDR2;
+
+		/* ask what device type on this row */
+		if (MTR_DRAM_WIDTH(mtr))
+			p_csrow->dtype = DEV_X8;
+		else
+			p_csrow->dtype = DEV_X4;
+
+		p_csrow->edac_mode = EDAC_S8ECD8ED;
+
+		empty = 0;
+	}
+
+	return empty;
+}
+
+/*
+ *	i5400_enable_error_reporting
+ *			Turn on the memory reporting features of the hardware
+ */
+static void i5400_enable_error_reporting(struct mem_ctl_info *mci)
+{
+	struct i5400_pvt *pvt;
+	u32 fbd_error_mask;
+
+	pvt = mci->pvt_info;
+
+	/* Read the FBD Error Mask Register */
+	pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD,
+			&fbd_error_mask);
+
+	/* Enable with a '0' */
+	fbd_error_mask &= ~(ENABLE_EMASK_ALL);
+
+	pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD,
+			fbd_error_mask);
+}
+
+/*
+ *	i5400_probe1	Probe for ONE instance of device to see if it is
+ *			present.
+ *	return:
+ *		0 for FOUND a device
+ *		< 0 for error code
+ */
+static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
+{
+	struct mem_ctl_info *mci;
+	struct i5400_pvt *pvt;
+	int num_channels;
+	int num_dimms_per_channel;
+	int num_csrows;
+
+	if (dev_idx >= ARRAY_SIZE(i5400_devs))
+		return -EINVAL;
+
+	debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n",
+		__func__,
+		pdev->bus->number,
+		PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+
+	/* We only are looking for func 0 of the set */
+	if (PCI_FUNC(pdev->devfn) != 0)
+		return -ENODEV;
+
+	/* As we don't have a motherboard identification routine to determine
+	 * actual number of slots/dimms per channel, we thus utilize the
+	 * resource as specified by the chipset. Thus, we might have
+	 * have more DIMMs per channel than actually on the mobo, but this
+	 * allows the driver to support upto the chipset max, without
+	 * some fancy mobo determination.
+	 */
+	num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL;
+	num_channels = MAX_CHANNELS;
+	num_csrows = num_dimms_per_channel;
+
+	debugf0("MC: %s(): Number of - Channels= %d  DIMMS= %d  CSROWS= %d\n",
+		__func__, num_channels, num_dimms_per_channel, num_csrows);
+
+	/* allocate a new MC control structure */
+	mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
+
+	if (mci == NULL)
+		return -ENOMEM;
+
+	debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+
+	mci->dev = &pdev->dev;	/* record ptr  to the generic device */
+
+	pvt = mci->pvt_info;
+	pvt->system_address = pdev;	/* Record this device in our private */
+	pvt->maxch = num_channels;
+	pvt->maxdimmperch = num_dimms_per_channel;
+
+	/* 'get' the pci devices we want to reserve for our use */
+	if (i5400_get_devices(mci, dev_idx))
+		goto fail0;
+
+	/* Time to get serious */
+	i5400_get_mc_regs(mci);	/* retrieve the hardware registers */
+
+	mci->mc_idx = 0;
+	mci->mtype_cap = MEM_FLAG_FB_DDR2;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE;
+	mci->edac_cap = EDAC_FLAG_NONE;
+	mci->mod_name = "i5400_edac.c";
+	mci->mod_ver = I5400_REVISION;
+	mci->ctl_name = i5400_devs[dev_idx].ctl_name;
+	mci->dev_name = pci_name(pdev);
+	mci->ctl_page_to_phys = NULL;
+
+	/* Set the function pointer to an actual operation function */
+	mci->edac_check = i5400_check_error;
+
+	/* initialize the MC control structure 'csrows' table
+	 * with the mapping and control information */
+	if (i5400_init_csrows(mci)) {
+		debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
+			"    because i5400_init_csrows() returned nonzero "
+			"value\n");
+		mci->edac_cap = EDAC_FLAG_NONE;	/* no csrows found */
+	} else {
+		debugf1("MC: Enable error reporting now\n");
+		i5400_enable_error_reporting(mci);
+	}
+
+	/* add this new MC control structure to EDAC's list of MCs */
+	if (edac_mc_add_mc(mci)) {
+		debugf0("MC: " __FILE__
+			": %s(): failed edac_mc_add_mc()\n", __func__);
+		/* FIXME: perhaps some code should go here that disables error
+		 * reporting if we just enabled it
+		 */
+		goto fail1;
+	}
+
+	i5400_clear_error(mci);
+
+	/* allocating generic PCI control info */
+	i5400_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!i5400_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n",
+			__func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
+
+	return 0;
+
+	/* Error exit unwinding stack */
+fail1:
+
+	i5400_put_devices(mci);
+
+fail0:
+	edac_mc_free(mci);
+	return -ENODEV;
+}
+
+/*
+ *	i5400_init_one	constructor for one instance of device
+ *
+ * 	returns:
+ *		negative on error
+ *		count (>= 0)
+ */
+static int __devinit i5400_init_one(struct pci_dev *pdev,
+				const struct pci_device_id *id)
+{
+	int rc;
+
+	debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+	/* wake up device */
+	rc = pci_enable_device(pdev);
+	if (rc == -EIO)
+		return rc;
+
+	/* now probe and enable the device */
+	return i5400_probe1(pdev, id->driver_data);
+}
+
+/*
+ *	i5400_remove_one	destructor for one instance of device
+ *
+ */
+static void __devexit i5400_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+
+	debugf0(__FILE__ ": %s()\n", __func__);
+
+	if (i5400_pci)
+		edac_pci_release_generic_ctl(i5400_pci);
+
+	mci = edac_mc_del_mc(&pdev->dev);
+	if (!mci)
+		return;
+
+	/* retrieve references to resources, and free those resources */
+	i5400_put_devices(mci);
+
+	edac_mc_free(mci);
+}
+
+/*
+ *	pci_device_id	table for which devices we are looking for
+ *
+ *	The "E500P" device is the first device supported.
+ */
+static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
+	{0,}			/* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i5400_pci_tbl);
+
+/*
+ *	i5400_driver	pci_driver structure for this module
+ *
+ */
+static struct pci_driver i5400_driver = {
+	.name = "i5400_edac",
+	.probe = i5400_init_one,
+	.remove = __devexit_p(i5400_remove_one),
+	.id_table = i5400_pci_tbl,
+};
+
+/*
+ *	i5400_init		Module entry function
+ *			Try to initialize this module for its devices
+ */
+static int __init i5400_init(void)
+{
+	int pci_rc;
+
+	debugf2("MC: " __FILE__ ": %s()\n", __func__);
+
+	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
+	opstate_init();
+
+	pci_rc = pci_register_driver(&i5400_driver);
+
+	return (pci_rc < 0) ? pci_rc : 0;
+}
+
+/*
+ *	i5400_exit()	Module exit function
+ *			Unregister the driver
+ */
+static void __exit i5400_exit(void)
+{
+	debugf2("MC: " __FILE__ ": %s()\n", __func__);
+	pci_unregister_driver(&i5400_driver);
+}
+
+module_init(i5400_init);
+module_exit(i5400_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ben Woodard <woodard@redhat.com>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_DESCRIPTION("MC Driver for Intel I5400 memory controllers - "
+		   I5400_REVISION);
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
new file mode 100644
index 0000000..577760a
--- /dev/null
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -0,0 +1,468 @@
+/*
+ * Intel 82443BX/GX (440BX/GX chipset) Memory Controller EDAC kernel
+ * module (C) 2006 Tim Small
+ *
+ * This file may be distributed under the terms of the GNU General
+ * Public License.
+ *
+ * Written by Tim Small <tim@buttersideup.com>, based on work by Linux
+ * Networx, Thayne Harbaugh, Dan Hollis <goemon at anime dot net> and
+ * others.
+ *
+ * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>.
+ *
+ * Written with reference to 82443BX Host Bridge Datasheet:
+ * http://www.intel.com/design/chipsets/440/documentation.htm
+ * references to this document given in [].
+ *
+ * This module doesn't support the 440LX, but it may be possible to
+ * make it do so (the 440LX's register definitions are different, but
+ * not completely so - I haven't studied them in enough detail to know
+ * how easy this would be).
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/slab.h>
+
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define I82443_REVISION	"0.1"
+
+#define EDAC_MOD_STR    "i82443bxgx_edac"
+
+/* The 82443BX supports SDRAM, or EDO (EDO for mobile only), "Memory
+ * Size: 8 MB to 512 MB (1GB with Registered DIMMs) with eight memory
+ * rows" "The 82443BX supports multiple-bit error detection and
+ * single-bit error correction when ECC mode is enabled and
+ * single/multi-bit error detection when correction is disabled.
+ * During writes to the DRAM, the 82443BX generates ECC for the data
+ * on a QWord basis. Partial QWord writes require a read-modify-write
+ * cycle when ECC is enabled."
+*/
+
+/* "Additionally, the 82443BX ensures that the data is corrected in
+ * main memory so that accumulation of errors is prevented. Another
+ * error within the same QWord would result in a double-bit error
+ * which is unrecoverable. This is known as hardware scrubbing since
+ * it requires no software intervention to correct the data in memory."
+ */
+
+/* [Also see page 100 (section 4.3), "DRAM Interface"]
+ * [Also see page 112 (section 4.6.1.4), ECC]
+ */
+
+#define I82443BXGX_NR_CSROWS 8
+#define I82443BXGX_NR_CHANS  1
+#define I82443BXGX_NR_DIMMS  4
+
+/* 82443 PCI Device 0 */
+#define I82443BXGX_NBXCFG 0x50	/* 32bit register starting at this PCI
+				 * config space offset */
+#define I82443BXGX_NBXCFG_OFFSET_NON_ECCROW 24	/* Array of bits, zero if
+						 * row is non-ECC */
+#define I82443BXGX_NBXCFG_OFFSET_DRAM_FREQ 12	/* 2 bits,00=100MHz,10=66 MHz */
+
+#define I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY 7	/* 2 bits:       */
+#define I82443BXGX_NBXCFG_INTEGRITY_NONE   0x0	/* 00 = Non-ECC */
+#define I82443BXGX_NBXCFG_INTEGRITY_EC     0x1	/* 01 = EC (only) */
+#define I82443BXGX_NBXCFG_INTEGRITY_ECC    0x2	/* 10 = ECC */
+#define I82443BXGX_NBXCFG_INTEGRITY_SCRUB  0x3	/* 11 = ECC + HW Scrub */
+
+#define I82443BXGX_NBXCFG_OFFSET_ECC_DIAG_ENABLE  6
+
+/* 82443 PCI Device 0 */
+#define I82443BXGX_EAP   0x80	/* 32bit register starting at this PCI
+				 * config space offset, Error Address
+				 * Pointer Register */
+#define I82443BXGX_EAP_OFFSET_EAP  12	/* High 20 bits of error address */
+#define I82443BXGX_EAP_OFFSET_MBE  BIT(1)	/* Err at EAP was multi-bit (W1TC) */
+#define I82443BXGX_EAP_OFFSET_SBE  BIT(0)	/* Err at EAP was single-bit (W1TC) */
+
+#define I82443BXGX_ERRCMD  0x90	/* 8bit register starting at this PCI
+				 * config space offset. */
+#define I82443BXGX_ERRCMD_OFFSET_SERR_ON_MBE BIT(1)	/* 1 = enable */
+#define I82443BXGX_ERRCMD_OFFSET_SERR_ON_SBE BIT(0)	/* 1 = enable */
+
+#define I82443BXGX_ERRSTS  0x91	/* 16bit register starting at this PCI
+				 * config space offset. */
+#define I82443BXGX_ERRSTS_OFFSET_MBFRE 5	/* 3 bits - first err row multibit */
+#define I82443BXGX_ERRSTS_OFFSET_MEF   BIT(4)	/* 1 = MBE occurred */
+#define I82443BXGX_ERRSTS_OFFSET_SBFRE 1	/* 3 bits - first err row singlebit */
+#define I82443BXGX_ERRSTS_OFFSET_SEF   BIT(0)	/* 1 = SBE occurred */
+
+#define I82443BXGX_DRAMC 0x57	/* 8bit register starting at this PCI
+				 * config space offset. */
+#define I82443BXGX_DRAMC_OFFSET_DT 3	/* 2 bits, DRAM Type */
+#define I82443BXGX_DRAMC_DRAM_IS_EDO 0	/* 00 = EDO */
+#define I82443BXGX_DRAMC_DRAM_IS_SDRAM 1	/* 01 = SDRAM */
+#define I82443BXGX_DRAMC_DRAM_IS_RSDRAM 2	/* 10 = Registered SDRAM */
+
+#define I82443BXGX_DRB 0x60	/* 8x 8bit registers starting at this PCI
+				 * config space offset. */
+
+/* FIXME - don't poll when ECC disabled? */
+
+struct i82443bxgx_edacmc_error_info {
+	u32 eap;
+};
+
+static struct edac_pci_ctl_info *i82443bxgx_pci;
+
+static struct pci_dev *mci_pdev;	/* init dev: in case that AGP code has
+					 * already registered driver
+					 */
+
+static int i82443bxgx_registered = 1;
+
+static void i82443bxgx_edacmc_get_error_info(struct mem_ctl_info *mci,
+				struct i82443bxgx_edacmc_error_info
+				*info)
+{
+	struct pci_dev *pdev;
+	pdev = to_pci_dev(mci->dev);
+	pci_read_config_dword(pdev, I82443BXGX_EAP, &info->eap);
+	if (info->eap & I82443BXGX_EAP_OFFSET_SBE)
+		/* Clear error to allow next error to be reported [p.61] */
+		pci_write_bits32(pdev, I82443BXGX_EAP,
+				 I82443BXGX_EAP_OFFSET_SBE,
+				 I82443BXGX_EAP_OFFSET_SBE);
+
+	if (info->eap & I82443BXGX_EAP_OFFSET_MBE)
+		/* Clear error to allow next error to be reported [p.61] */
+		pci_write_bits32(pdev, I82443BXGX_EAP,
+				 I82443BXGX_EAP_OFFSET_MBE,
+				 I82443BXGX_EAP_OFFSET_MBE);
+}
+
+static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci,
+						struct
+						i82443bxgx_edacmc_error_info
+						*info, int handle_errors)
+{
+	int error_found = 0;
+	u32 eapaddr, page, pageoffset;
+
+	/* bits 30:12 hold the 4kb block in which the error occurred
+	 * [p.61] */
+	eapaddr = (info->eap & 0xfffff000);
+	page = eapaddr >> PAGE_SHIFT;
+	pageoffset = eapaddr - (page << PAGE_SHIFT);
+
+	if (info->eap & I82443BXGX_EAP_OFFSET_SBE) {
+		error_found = 1;
+		if (handle_errors)
+			edac_mc_handle_ce(mci, page, pageoffset,
+				/* 440BX/GX don't make syndrome information
+				 * available */
+				0, edac_mc_find_csrow_by_page(mci, page), 0,
+				mci->ctl_name);
+	}
+
+	if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
+		error_found = 1;
+		if (handle_errors)
+			edac_mc_handle_ue(mci, page, pageoffset,
+					edac_mc_find_csrow_by_page(mci, page),
+					mci->ctl_name);
+	}
+
+	return error_found;
+}
+
+static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci)
+{
+	struct i82443bxgx_edacmc_error_info info;
+
+	debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+	i82443bxgx_edacmc_get_error_info(mci, &info);
+	i82443bxgx_edacmc_process_error_info(mci, &info, 1);
+}
+
+static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
+				struct pci_dev *pdev,
+				enum edac_type edac_mode,
+				enum mem_type mtype)
+{
+	struct csrow_info *csrow;
+	int index;
+	u8 drbar, dramc;
+	u32 row_base, row_high_limit, row_high_limit_last;
+
+	pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
+	row_high_limit_last = 0;
+	for (index = 0; index < mci->nr_csrows; index++) {
+		csrow = &mci->csrows[index];
+		pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
+		debugf1("MC%d: " __FILE__ ": %s() Row=%d DRB = %#0x\n",
+			mci->mc_idx, __func__, index, drbar);
+		row_high_limit = ((u32) drbar << 23);
+		/* find the DRAM Chip Select Base address and mask */
+		debugf1("MC%d: " __FILE__ ": %s() Row=%d, "
+			"Boundry Address=%#0x, Last = %#0x \n",
+			mci->mc_idx, __func__, index, row_high_limit,
+			row_high_limit_last);
+
+		/* 440GX goes to 2GB, represented with a DRB of 0. */
+		if (row_high_limit_last && !row_high_limit)
+			row_high_limit = 1UL << 31;
+
+		/* This row is empty [p.49] */
+		if (row_high_limit == row_high_limit_last)
+			continue;
+		row_base = row_high_limit_last;
+		csrow->first_page = row_base >> PAGE_SHIFT;
+		csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
+		csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+		/* EAP reports in 4kilobyte granularity [61] */
+		csrow->grain = 1 << 12;
+		csrow->mtype = mtype;
+		/* I don't think 440BX can tell you device type? FIXME? */
+		csrow->dtype = DEV_UNKNOWN;
+		/* Mode is global to all rows on 440BX */
+		csrow->edac_mode = edac_mode;
+		row_high_limit_last = row_high_limit;
+	}
+}
+
+static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
+{
+	struct mem_ctl_info *mci;
+	u8 dramc;
+	u32 nbxcfg, ecc_mode;
+	enum mem_type mtype;
+	enum edac_type edac_mode;
+
+	debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+	/* Something is really hosed if PCI config space reads from
+	 * the MC aren't working.
+	 */
+	if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg))
+		return -EIO;
+
+	mci = edac_mc_alloc(0, I82443BXGX_NR_CSROWS, I82443BXGX_NR_CHANS, 0);
+
+	if (mci == NULL)
+		return -ENOMEM;
+
+	debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+	mci->dev = &pdev->dev;
+	mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+	pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
+	switch ((dramc >> I82443BXGX_DRAMC_OFFSET_DT) & (BIT(0) | BIT(1))) {
+	case I82443BXGX_DRAMC_DRAM_IS_EDO:
+		mtype = MEM_EDO;
+		break;
+	case I82443BXGX_DRAMC_DRAM_IS_SDRAM:
+		mtype = MEM_SDR;
+		break;
+	case I82443BXGX_DRAMC_DRAM_IS_RSDRAM:
+		mtype = MEM_RDR;
+		break;
+	default:
+		debugf0("Unknown/reserved DRAM type value "
+			"in DRAMC register!\n");
+		mtype = -MEM_UNKNOWN;
+	}
+
+	if ((mtype == MEM_SDR) || (mtype == MEM_RDR))
+		mci->edac_cap = mci->edac_ctl_cap;
+	else
+		mci->edac_cap = EDAC_FLAG_NONE;
+
+	mci->scrub_cap = SCRUB_FLAG_HW_SRC;
+	pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg);
+	ecc_mode = ((nbxcfg >> I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY) &
+		(BIT(0) | BIT(1)));
+
+	mci->scrub_mode = (ecc_mode == I82443BXGX_NBXCFG_INTEGRITY_SCRUB)
+		? SCRUB_HW_SRC : SCRUB_NONE;
+
+	switch (ecc_mode) {
+	case I82443BXGX_NBXCFG_INTEGRITY_NONE:
+		edac_mode = EDAC_NONE;
+		break;
+	case I82443BXGX_NBXCFG_INTEGRITY_EC:
+		edac_mode = EDAC_EC;
+		break;
+	case I82443BXGX_NBXCFG_INTEGRITY_ECC:
+	case I82443BXGX_NBXCFG_INTEGRITY_SCRUB:
+		edac_mode = EDAC_SECDED;
+		break;
+	default:
+		debugf0("%s(): Unknown/reserved ECC state "
+			"in NBXCFG register!\n", __func__);
+		edac_mode = EDAC_UNKNOWN;
+		break;
+	}
+
+	i82443bxgx_init_csrows(mci, pdev, edac_mode, mtype);
+
+	/* Many BIOSes don't clear error flags on boot, so do this
+	 * here, or we get "phantom" errors occuring at module-load
+	 * time. */
+	pci_write_bits32(pdev, I82443BXGX_EAP,
+			(I82443BXGX_EAP_OFFSET_SBE |
+				I82443BXGX_EAP_OFFSET_MBE),
+			(I82443BXGX_EAP_OFFSET_SBE |
+				I82443BXGX_EAP_OFFSET_MBE));
+
+	mci->mod_name = EDAC_MOD_STR;
+	mci->mod_ver = I82443_REVISION;
+	mci->ctl_name = "I82443BXGX";
+	mci->dev_name = pci_name(pdev);
+	mci->edac_check = i82443bxgx_edacmc_check;
+	mci->ctl_page_to_phys = NULL;
+
+	if (edac_mc_add_mc(mci)) {
+		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+		goto fail;
+	}
+
+	/* allocating generic PCI control info */
+	i82443bxgx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!i82443bxgx_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n",
+			__func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
+
+	debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+	return 0;
+
+fail:
+	edac_mc_free(mci);
+	return -ENODEV;
+}
+
+EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_probe1);
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
+						const struct pci_device_id *ent)
+{
+	int rc;
+
+	debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+	/* don't need to call pci_device_enable() */
+	rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
+
+	if (mci_pdev == NULL)
+		mci_pdev = pci_dev_get(pdev);
+
+	return rc;
+}
+
+static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+
+	debugf0(__FILE__ ": %s()\n", __func__);
+
+	if (i82443bxgx_pci)
+		edac_pci_release_generic_ctl(i82443bxgx_pci);
+
+	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+		return;
+
+	edac_mc_free(mci);
+}
+
+EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
+
+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2)},
+	{0,}			/* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i82443bxgx_pci_tbl);
+
+static struct pci_driver i82443bxgx_edacmc_driver = {
+	.name = EDAC_MOD_STR,
+	.probe = i82443bxgx_edacmc_init_one,
+	.remove = __devexit_p(i82443bxgx_edacmc_remove_one),
+	.id_table = i82443bxgx_pci_tbl,
+};
+
+static int __init i82443bxgx_edacmc_init(void)
+{
+	int pci_rc;
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
+	pci_rc = pci_register_driver(&i82443bxgx_edacmc_driver);
+	if (pci_rc < 0)
+		goto fail0;
+
+	if (mci_pdev == NULL) {
+		const struct pci_device_id *id = &i82443bxgx_pci_tbl[0];
+		int i = 0;
+		i82443bxgx_registered = 0;
+
+		while (mci_pdev == NULL && id->vendor != 0) {
+			mci_pdev = pci_get_device(id->vendor,
+					id->device, NULL);
+			i++;
+			id = &i82443bxgx_pci_tbl[i];
+		}
+		if (!mci_pdev) {
+			debugf0("i82443bxgx pci_get_device fail\n");
+			pci_rc = -ENODEV;
+			goto fail1;
+		}
+
+		pci_rc = i82443bxgx_edacmc_init_one(mci_pdev, i82443bxgx_pci_tbl);
+
+		if (pci_rc < 0) {
+			debugf0("i82443bxgx init fail\n");
+			pci_rc = -ENODEV;
+			goto fail1;
+		}
+	}
+
+	return 0;
+
+fail1:
+	pci_unregister_driver(&i82443bxgx_edacmc_driver);
+
+fail0:
+	if (mci_pdev != NULL)
+		pci_dev_put(mci_pdev);
+
+	return pci_rc;
+}
+
+static void __exit i82443bxgx_edacmc_exit(void)
+{
+	pci_unregister_driver(&i82443bxgx_edacmc_driver);
+
+	if (!i82443bxgx_registered)
+		i82443bxgx_edacmc_remove_one(mci_pdev);
+
+	if (mci_pdev)
+		pci_dev_put(mci_pdev);
+}
+
+module_init(i82443bxgx_edacmc_init);
+module_exit(i82443bxgx_edacmc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD");
+MODULE_DESCRIPTION("EDAC MC support for Intel 82443BX/GX memory controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
new file mode 100644
index 0000000..c0088ba
--- /dev/null
+++ b/drivers/edac/i82860_edac.c
@@ -0,0 +1,354 @@
+/*
+ * Intel 82860 Memory Controller kernel module
+ * (C) 2005 Red Hat (http://www.redhat.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Ben Woodard <woodard@redhat.com>
+ * shamelessly copied from and based upon the edac_i82875 driver
+ * by Thayne Harbaugh of Linux Networx. (http://lnxi.com)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define  I82860_REVISION " Ver: 2.0.2 " __DATE__
+#define EDAC_MOD_STR	"i82860_edac"
+
+#define i82860_printk(level, fmt, arg...) \
+	edac_printk(level, "i82860", fmt, ##arg)
+
+#define i82860_mc_printk(mci, level, fmt, arg...) \
+	edac_mc_chipset_printk(mci, level, "i82860", fmt, ##arg)
+
+#ifndef PCI_DEVICE_ID_INTEL_82860_0
+#define PCI_DEVICE_ID_INTEL_82860_0	0x2531
+#endif				/* PCI_DEVICE_ID_INTEL_82860_0 */
+
+#define I82860_MCHCFG 0x50
+#define I82860_GBA 0x60
+#define I82860_GBA_MASK 0x7FF
+#define I82860_GBA_SHIFT 24
+#define I82860_ERRSTS 0xC8
+#define I82860_EAP 0xE4
+#define I82860_DERRCTL_STS 0xE2
+
+enum i82860_chips {
+	I82860 = 0,
+};
+
+struct i82860_dev_info {
+	const char *ctl_name;
+};
+
+struct i82860_error_info {
+	u16 errsts;
+	u32 eap;
+	u16 derrsyn;
+	u16 errsts2;
+};
+
+static const struct i82860_dev_info i82860_devs[] = {
+	[I82860] = {
+		.ctl_name = "i82860"},
+};
+
+static struct pci_dev *mci_pdev;	/* init dev: in case that AGP code
+					 * has already registered driver
+					 */
+static struct edac_pci_ctl_info *i82860_pci;
+
+static void i82860_get_error_info(struct mem_ctl_info *mci,
+				struct i82860_error_info *info)
+{
+	struct pci_dev *pdev;
+
+	pdev = to_pci_dev(mci->dev);
+
+	/*
+	 * This is a mess because there is no atomic way to read all the
+	 * registers at once and the registers can transition from CE being
+	 * overwritten by UE.
+	 */
+	pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts);
+	pci_read_config_dword(pdev, I82860_EAP, &info->eap);
+	pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn);
+	pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts2);
+
+	pci_write_bits16(pdev, I82860_ERRSTS, 0x0003, 0x0003);
+
+	/*
+	 * If the error is the same for both reads then the first set of reads
+	 * is valid.  If there is a change then there is a CE no info and the
+	 * second set of reads is valid and should be UE info.
+	 */
+	if (!(info->errsts2 & 0x0003))
+		return;
+
+	if ((info->errsts ^ info->errsts2) & 0x0003) {
+		pci_read_config_dword(pdev, I82860_EAP, &info->eap);
+		pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn);
+	}
+}
+
+static int i82860_process_error_info(struct mem_ctl_info *mci,
+				struct i82860_error_info *info,
+				int handle_errors)
+{
+	int row;
+
+	if (!(info->errsts2 & 0x0003))
+		return 0;
+
+	if (!handle_errors)
+		return 1;
+
+	if ((info->errsts ^ info->errsts2) & 0x0003) {
+		edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+		info->errsts = info->errsts2;
+	}
+
+	info->eap >>= PAGE_SHIFT;
+	row = edac_mc_find_csrow_by_page(mci, info->eap);
+
+	if (info->errsts & 0x0002)
+		edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
+	else
+		edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0,
+				"i82860 UE");
+
+	return 1;
+}
+
+static void i82860_check(struct mem_ctl_info *mci)
+{
+	struct i82860_error_info info;
+
+	debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+	i82860_get_error_info(mci, &info);
+	i82860_process_error_info(mci, &info, 1);
+}
+
+static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
+{
+	unsigned long last_cumul_size;
+	u16 mchcfg_ddim;	/* DRAM Data Integrity Mode 0=none, 2=edac */
+	u16 value;
+	u32 cumul_size;
+	struct csrow_info *csrow;
+	int index;
+
+	pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
+	mchcfg_ddim = mchcfg_ddim & 0x180;
+	last_cumul_size = 0;
+
+	/* The group row boundary (GRA) reg values are boundary address
+	 * for each DRAM row with a granularity of 16MB.  GRA regs are
+	 * cumulative; therefore GRA15 will contain the total memory contained
+	 * in all eight rows.
+	 */
+	for (index = 0; index < mci->nr_csrows; index++) {
+		csrow = &mci->csrows[index];
+		pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
+		cumul_size = (value & I82860_GBA_MASK) <<
+			(I82860_GBA_SHIFT - PAGE_SHIFT);
+		debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
+			cumul_size);
+
+		if (cumul_size == last_cumul_size)
+			continue;	/* not populated */
+
+		csrow->first_page = last_cumul_size;
+		csrow->last_page = cumul_size - 1;
+		csrow->nr_pages = cumul_size - last_cumul_size;
+		last_cumul_size = cumul_size;
+		csrow->grain = 1 << 12;	/* I82860_EAP has 4KiB reolution */
+		csrow->mtype = MEM_RMBS;
+		csrow->dtype = DEV_UNKNOWN;
+		csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
+	}
+}
+
+static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
+{
+	struct mem_ctl_info *mci;
+	struct i82860_error_info discard;
+
+	/* RDRAM has channels but these don't map onto the abstractions that
+	   edac uses.
+	   The device groups from the GRA registers seem to map reasonably
+	   well onto the notion of a chip select row.
+	   There are 16 GRA registers and since the name is associated with
+	   the channel and the GRA registers map to physical devices so we are
+	   going to make 1 channel for group.
+	 */
+	mci = edac_mc_alloc(0, 16, 1, 0);
+
+	if (!mci)
+		return -ENOMEM;
+
+	debugf3("%s(): init mci\n", __func__);
+	mci->dev = &pdev->dev;
+	mci->mtype_cap = MEM_FLAG_DDR;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+	/* I"m not sure about this but I think that all RDRAM is SECDED */
+	mci->edac_cap = EDAC_FLAG_SECDED;
+	mci->mod_name = EDAC_MOD_STR;
+	mci->mod_ver = I82860_REVISION;
+	mci->ctl_name = i82860_devs[dev_idx].ctl_name;
+	mci->dev_name = pci_name(pdev);
+	mci->edac_check = i82860_check;
+	mci->ctl_page_to_phys = NULL;
+	i82860_init_csrows(mci, pdev);
+	i82860_get_error_info(mci, &discard);	/* clear counters */
+
+	/* Here we assume that we will never see multiple instances of this
+	 * type of memory controller.  The ID is therefore hardcoded to 0.
+	 */
+	if (edac_mc_add_mc(mci)) {
+		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+		goto fail;
+	}
+
+	/* allocating generic PCI control info */
+	i82860_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!i82860_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n",
+			__func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
+
+	/* get this far and it's successful */
+	debugf3("%s(): success\n", __func__);
+
+	return 0;
+
+fail:
+	edac_mc_free(mci);
+	return -ENODEV;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i82860_init_one(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	int rc;
+
+	debugf0("%s()\n", __func__);
+	i82860_printk(KERN_INFO, "i82860 init one\n");
+
+	if (pci_enable_device(pdev) < 0)
+		return -EIO;
+
+	rc = i82860_probe1(pdev, ent->driver_data);
+
+	if (rc == 0)
+		mci_pdev = pci_dev_get(pdev);
+
+	return rc;
+}
+
+static void __devexit i82860_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+
+	debugf0("%s()\n", __func__);
+
+	if (i82860_pci)
+		edac_pci_release_generic_ctl(i82860_pci);
+
+	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+		return;
+
+	edac_mc_free(mci);
+}
+
+static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
+	{
+	 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 I82860},
+	{
+	 0,
+	 }			/* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i82860_pci_tbl);
+
+static struct pci_driver i82860_driver = {
+	.name = EDAC_MOD_STR,
+	.probe = i82860_init_one,
+	.remove = __devexit_p(i82860_remove_one),
+	.id_table = i82860_pci_tbl,
+};
+
+static int __init i82860_init(void)
+{
+	int pci_rc;
+
+	debugf3("%s()\n", __func__);
+
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
+	if ((pci_rc = pci_register_driver(&i82860_driver)) < 0)
+		goto fail0;
+
+	if (!mci_pdev) {
+		mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+					PCI_DEVICE_ID_INTEL_82860_0, NULL);
+
+		if (mci_pdev == NULL) {
+			debugf0("860 pci_get_device fail\n");
+			pci_rc = -ENODEV;
+			goto fail1;
+		}
+
+		pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl);
+
+		if (pci_rc < 0) {
+			debugf0("860 init fail\n");
+			pci_rc = -ENODEV;
+			goto fail1;
+		}
+	}
+
+	return 0;
+
+fail1:
+	pci_unregister_driver(&i82860_driver);
+
+fail0:
+	if (mci_pdev != NULL)
+		pci_dev_put(mci_pdev);
+
+	return pci_rc;
+}
+
+static void __exit i82860_exit(void)
+{
+	debugf3("%s()\n", __func__);
+
+	pci_unregister_driver(&i82860_driver);
+
+	if (mci_pdev != NULL)
+		pci_dev_put(mci_pdev);
+}
+
+module_init(i82860_init);
+module_exit(i82860_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) "
+		"Ben Woodard <woodard@redhat.com>");
+MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
new file mode 100644
index 0000000..b2d83b9
--- /dev/null
+++ b/drivers/edac/i82875p_edac.c
@@ -0,0 +1,597 @@
+/*
+ * Intel D82875P Memory Controller kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Contributors:
+ *	Wang Zhenyu at intel.com
+ *
+ * $Id: edac_i82875p.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define I82875P_REVISION	" Ver: 2.0.2 " __DATE__
+#define EDAC_MOD_STR		"i82875p_edac"
+
+#define i82875p_printk(level, fmt, arg...) \
+	edac_printk(level, "i82875p", fmt, ##arg)
+
+#define i82875p_mc_printk(mci, level, fmt, arg...) \
+	edac_mc_chipset_printk(mci, level, "i82875p", fmt, ##arg)
+
+#ifndef PCI_DEVICE_ID_INTEL_82875_0
+#define PCI_DEVICE_ID_INTEL_82875_0	0x2578
+#endif				/* PCI_DEVICE_ID_INTEL_82875_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_82875_6
+#define PCI_DEVICE_ID_INTEL_82875_6	0x257e
+#endif				/* PCI_DEVICE_ID_INTEL_82875_6 */
+
+/* four csrows in dual channel, eight in single channel */
+#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans))
+
+/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
+#define I82875P_EAP		0x58	/* Error Address Pointer (32b)
+					 *
+					 * 31:12 block address
+					 * 11:0  reserved
+					 */
+
+#define I82875P_DERRSYN		0x5c	/* DRAM Error Syndrome (8b)
+					 *
+					 *  7:0  DRAM ECC Syndrome
+					 */
+
+#define I82875P_DES		0x5d	/* DRAM Error Status (8b)
+					 *
+					 *  7:1  reserved
+					 *  0    Error channel 0/1
+					 */
+
+#define I82875P_ERRSTS		0xc8	/* Error Status Register (16b)
+					 *
+					 * 15:10 reserved
+					 *  9    non-DRAM lock error (ndlock)
+					 *  8    Sftwr Generated SMI
+					 *  7    ECC UE
+					 *  6    reserved
+					 *  5    MCH detects unimplemented cycle
+					 *  4    AGP access outside GA
+					 *  3    Invalid AGP access
+					 *  2    Invalid GA translation table
+					 *  1    Unsupported AGP command
+					 *  0    ECC CE
+					 */
+
+#define I82875P_ERRCMD		0xca	/* Error Command (16b)
+					 *
+					 * 15:10 reserved
+					 *  9    SERR on non-DRAM lock
+					 *  8    SERR on ECC UE
+					 *  7    SERR on ECC CE
+					 *  6    target abort on high exception
+					 *  5    detect unimplemented cyc
+					 *  4    AGP access outside of GA
+					 *  3    SERR on invalid AGP access
+					 *  2    invalid translation table
+					 *  1    SERR on unsupported AGP command
+					 *  0    reserved
+					 */
+
+/* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */
+#define I82875P_PCICMD6		0x04	/* PCI Command Register (16b)
+					 *
+					 * 15:10 reserved
+					 *  9    fast back-to-back - ro 0
+					 *  8    SERR enable - ro 0
+					 *  7    addr/data stepping - ro 0
+					 *  6    parity err enable - ro 0
+					 *  5    VGA palette snoop - ro 0
+					 *  4    mem wr & invalidate - ro 0
+					 *  3    special cycle - ro 0
+					 *  2    bus master - ro 0
+					 *  1    mem access dev6 - 0(dis),1(en)
+					 *  0    IO access dev3 - 0(dis),1(en)
+					 */
+
+#define I82875P_BAR6		0x10	/* Mem Delays Base ADDR Reg (32b)
+					 *
+					 * 31:12 mem base addr [31:12]
+					 * 11:4  address mask - ro 0
+					 *  3    prefetchable - ro 0(non),1(pre)
+					 *  2:1  mem type - ro 0
+					 *  0    mem space - ro 0
+					 */
+
+/* Intel 82875p MMIO register space - device 0 function 0 - MMR space */
+
+#define I82875P_DRB_SHIFT 26	/* 64MiB grain */
+#define I82875P_DRB		0x00	/* DRAM Row Boundary (8b x 8)
+					 *
+					 *  7    reserved
+					 *  6:0  64MiB row boundary addr
+					 */
+
+#define I82875P_DRA		0x10	/* DRAM Row Attribute (4b x 8)
+					 *
+					 *  7    reserved
+					 *  6:4  row attr row 1
+					 *  3    reserved
+					 *  2:0  row attr row 0
+					 *
+					 * 000 =  4KiB
+					 * 001 =  8KiB
+					 * 010 = 16KiB
+					 * 011 = 32KiB
+					 */
+
+#define I82875P_DRC		0x68	/* DRAM Controller Mode (32b)
+					 *
+					 * 31:30 reserved
+					 * 29    init complete
+					 * 28:23 reserved
+					 * 22:21 nr chan 00=1,01=2
+					 * 20    reserved
+					 * 19:18 Data Integ Mode 00=none,01=ecc
+					 * 17:11 reserved
+					 * 10:8  refresh mode
+					 *  7    reserved
+					 *  6:4  mode select
+					 *  3:2  reserved
+					 *  1:0  DRAM type 01=DDR
+					 */
+
+enum i82875p_chips {
+	I82875P = 0,
+};
+
+struct i82875p_pvt {
+	struct pci_dev *ovrfl_pdev;
+	void __iomem *ovrfl_window;
+};
+
+struct i82875p_dev_info {
+	const char *ctl_name;
+};
+
+struct i82875p_error_info {
+	u16 errsts;
+	u32 eap;
+	u8 des;
+	u8 derrsyn;
+	u16 errsts2;
+};
+
+static const struct i82875p_dev_info i82875p_devs[] = {
+	[I82875P] = {
+		.ctl_name = "i82875p"},
+};
+
+static struct pci_dev *mci_pdev;	/* init dev: in case that AGP code has
+					 * already registered driver
+					 */
+
+static struct edac_pci_ctl_info *i82875p_pci;
+
+static void i82875p_get_error_info(struct mem_ctl_info *mci,
+				struct i82875p_error_info *info)
+{
+	struct pci_dev *pdev;
+
+	pdev = to_pci_dev(mci->dev);
+
+	/*
+	 * This is a mess because there is no atomic way to read all the
+	 * registers at once and the registers can transition from CE being
+	 * overwritten by UE.
+	 */
+	pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts);
+
+	if (!(info->errsts & 0x0081))
+		return;
+
+	pci_read_config_dword(pdev, I82875P_EAP, &info->eap);
+	pci_read_config_byte(pdev, I82875P_DES, &info->des);
+	pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn);
+	pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts2);
+
+	/*
+	 * If the error is the same then we can for both reads then
+	 * the first set of reads is valid.  If there is a change then
+	 * there is a CE no info and the second set of reads is valid
+	 * and should be UE info.
+	 */
+	if ((info->errsts ^ info->errsts2) & 0x0081) {
+		pci_read_config_dword(pdev, I82875P_EAP, &info->eap);
+		pci_read_config_byte(pdev, I82875P_DES, &info->des);
+		pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn);
+	}
+
+	pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081);
+}
+
+static int i82875p_process_error_info(struct mem_ctl_info *mci,
+				struct i82875p_error_info *info,
+				int handle_errors)
+{
+	int row, multi_chan;
+
+	multi_chan = mci->csrows[0].nr_channels - 1;
+
+	if (!(info->errsts & 0x0081))
+		return 0;
+
+	if (!handle_errors)
+		return 1;
+
+	if ((info->errsts ^ info->errsts2) & 0x0081) {
+		edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+		info->errsts = info->errsts2;
+	}
+
+	info->eap >>= PAGE_SHIFT;
+	row = edac_mc_find_csrow_by_page(mci, info->eap);
+
+	if (info->errsts & 0x0080)
+		edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
+	else
+		edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
+				multi_chan ? (info->des & 0x1) : 0,
+				"i82875p CE");
+
+	return 1;
+}
+
+static void i82875p_check(struct mem_ctl_info *mci)
+{
+	struct i82875p_error_info info;
+
+	debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+	i82875p_get_error_info(mci, &info);
+	i82875p_process_error_info(mci, &info, 1);
+}
+
+/* Return 0 on success or 1 on failure. */
+static int i82875p_setup_overfl_dev(struct pci_dev *pdev,
+				struct pci_dev **ovrfl_pdev,
+				void __iomem **ovrfl_window)
+{
+	struct pci_dev *dev;
+	void __iomem *window;
+	int err;
+
+	*ovrfl_pdev = NULL;
+	*ovrfl_window = NULL;
+	dev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
+
+	if (dev == NULL) {
+		/* Intel tells BIOS developers to hide device 6 which
+		 * configures the overflow device access containing
+		 * the DRBs - this is where we expose device 6.
+		 * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
+		 */
+		pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
+		dev = pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
+
+		if (dev == NULL)
+			return 1;
+
+		err = pci_bus_add_device(dev);
+		if (err) {
+			i82875p_printk(KERN_ERR,
+				"%s(): pci_bus_add_device() Failed\n",
+				__func__);
+		}
+		pci_bus_assign_resources(dev->bus);
+	}
+
+	*ovrfl_pdev = dev;
+
+	if (pci_enable_device(dev)) {
+		i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow "
+			"device\n", __func__);
+		return 1;
+	}
+
+	if (pci_request_regions(dev, pci_name(dev))) {
+#ifdef CORRECT_BIOS
+		goto fail0;
+#endif
+	}
+
+	/* cache is irrelevant for PCI bus reads/writes */
+	window = pci_ioremap_bar(dev, 0);
+	if (window == NULL) {
+		i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n",
+			__func__);
+		goto fail1;
+	}
+
+	*ovrfl_window = window;
+	return 0;
+
+fail1:
+	pci_release_regions(dev);
+
+#ifdef CORRECT_BIOS
+fail0:
+	pci_disable_device(dev);
+#endif
+	/* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
+	return 1;
+}
+
+/* Return 1 if dual channel mode is active.  Else return 0. */
+static inline int dual_channel_active(u32 drc)
+{
+	return (drc >> 21) & 0x1;
+}
+
+static void i82875p_init_csrows(struct mem_ctl_info *mci,
+				struct pci_dev *pdev,
+				void __iomem * ovrfl_window, u32 drc)
+{
+	struct csrow_info *csrow;
+	unsigned long last_cumul_size;
+	u8 value;
+	u32 drc_ddim;		/* DRAM Data Integrity Mode 0=none,2=edac */
+	u32 cumul_size;
+	int index;
+
+	drc_ddim = (drc >> 18) & 0x1;
+	last_cumul_size = 0;
+
+	/* The dram row boundary (DRB) reg values are boundary address
+	 * for each DRAM row with a granularity of 32 or 64MB (single/dual
+	 * channel operation).  DRB regs are cumulative; therefore DRB7 will
+	 * contain the total memory contained in all eight rows.
+	 */
+
+	for (index = 0; index < mci->nr_csrows; index++) {
+		csrow = &mci->csrows[index];
+
+		value = readb(ovrfl_window + I82875P_DRB + index);
+		cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
+		debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
+			cumul_size);
+		if (cumul_size == last_cumul_size)
+			continue;	/* not populated */
+
+		csrow->first_page = last_cumul_size;
+		csrow->last_page = cumul_size - 1;
+		csrow->nr_pages = cumul_size - last_cumul_size;
+		last_cumul_size = cumul_size;
+		csrow->grain = 1 << 12;	/* I82875P_EAP has 4KiB reolution */
+		csrow->mtype = MEM_DDR;
+		csrow->dtype = DEV_UNKNOWN;
+		csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
+	}
+}
+
+static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
+{
+	int rc = -ENODEV;
+	struct mem_ctl_info *mci;
+	struct i82875p_pvt *pvt;
+	struct pci_dev *ovrfl_pdev;
+	void __iomem *ovrfl_window;
+	u32 drc;
+	u32 nr_chans;
+	struct i82875p_error_info discard;
+
+	debugf0("%s()\n", __func__);
+
+	ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
+
+	if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window))
+		return -ENODEV;
+	drc = readl(ovrfl_window + I82875P_DRC);
+	nr_chans = dual_channel_active(drc) + 1;
+	mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
+			nr_chans, 0);
+
+	if (!mci) {
+		rc = -ENOMEM;
+		goto fail0;
+	}
+
+	/* Keeps mci available after edac_mc_del_mc() till edac_mc_free() */
+	kobject_get(&mci->edac_mci_kobj);
+
+	debugf3("%s(): init mci\n", __func__);
+	mci->dev = &pdev->dev;
+	mci->mtype_cap = MEM_FLAG_DDR;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+	mci->edac_cap = EDAC_FLAG_UNKNOWN;
+	mci->mod_name = EDAC_MOD_STR;
+	mci->mod_ver = I82875P_REVISION;
+	mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
+	mci->dev_name = pci_name(pdev);
+	mci->edac_check = i82875p_check;
+	mci->ctl_page_to_phys = NULL;
+	debugf3("%s(): init pvt\n", __func__);
+	pvt = (struct i82875p_pvt *)mci->pvt_info;
+	pvt->ovrfl_pdev = ovrfl_pdev;
+	pvt->ovrfl_window = ovrfl_window;
+	i82875p_init_csrows(mci, pdev, ovrfl_window, drc);
+	i82875p_get_error_info(mci, &discard);	/* clear counters */
+
+	/* Here we assume that we will never see multiple instances of this
+	 * type of memory controller.  The ID is therefore hardcoded to 0.
+	 */
+	if (edac_mc_add_mc(mci)) {
+		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+		goto fail1;
+	}
+
+	/* allocating generic PCI control info */
+	i82875p_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!i82875p_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n",
+			__func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
+
+	/* get this far and it's successful */
+	debugf3("%s(): success\n", __func__);
+	return 0;
+
+fail1:
+	kobject_put(&mci->edac_mci_kobj);
+	edac_mc_free(mci);
+
+fail0:
+	iounmap(ovrfl_window);
+	pci_release_regions(ovrfl_pdev);
+
+	pci_disable_device(ovrfl_pdev);
+	/* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
+	return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i82875p_init_one(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	int rc;
+
+	debugf0("%s()\n", __func__);
+	i82875p_printk(KERN_INFO, "i82875p init one\n");
+
+	if (pci_enable_device(pdev) < 0)
+		return -EIO;
+
+	rc = i82875p_probe1(pdev, ent->driver_data);
+
+	if (mci_pdev == NULL)
+		mci_pdev = pci_dev_get(pdev);
+
+	return rc;
+}
+
+static void __devexit i82875p_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+	struct i82875p_pvt *pvt = NULL;
+
+	debugf0("%s()\n", __func__);
+
+	if (i82875p_pci)
+		edac_pci_release_generic_ctl(i82875p_pci);
+
+	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+		return;
+
+	pvt = (struct i82875p_pvt *)mci->pvt_info;
+
+	if (pvt->ovrfl_window)
+		iounmap(pvt->ovrfl_window);
+
+	if (pvt->ovrfl_pdev) {
+#ifdef CORRECT_BIOS
+		pci_release_regions(pvt->ovrfl_pdev);
+#endif				/*CORRECT_BIOS */
+		pci_disable_device(pvt->ovrfl_pdev);
+		pci_dev_put(pvt->ovrfl_pdev);
+	}
+
+	edac_mc_free(mci);
+}
+
+static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
+	{
+	 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 I82875P},
+	{
+	 0,
+	 }			/* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
+
+static struct pci_driver i82875p_driver = {
+	.name = EDAC_MOD_STR,
+	.probe = i82875p_init_one,
+	.remove = __devexit_p(i82875p_remove_one),
+	.id_table = i82875p_pci_tbl,
+};
+
+static int __init i82875p_init(void)
+{
+	int pci_rc;
+
+	debugf3("%s()\n", __func__);
+
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
+	pci_rc = pci_register_driver(&i82875p_driver);
+
+	if (pci_rc < 0)
+		goto fail0;
+
+	if (mci_pdev == NULL) {
+		mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+					PCI_DEVICE_ID_INTEL_82875_0, NULL);
+
+		if (!mci_pdev) {
+			debugf0("875p pci_get_device fail\n");
+			pci_rc = -ENODEV;
+			goto fail1;
+		}
+
+		pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
+
+		if (pci_rc < 0) {
+			debugf0("875p init fail\n");
+			pci_rc = -ENODEV;
+			goto fail1;
+		}
+	}
+
+	return 0;
+
+fail1:
+	pci_unregister_driver(&i82875p_driver);
+
+fail0:
+	if (mci_pdev != NULL)
+		pci_dev_put(mci_pdev);
+
+	return pci_rc;
+}
+
+static void __exit i82875p_exit(void)
+{
+	debugf3("%s()\n", __func__);
+
+	i82875p_remove_one(mci_pdev);
+	pci_dev_put(mci_pdev);
+
+	pci_unregister_driver(&i82875p_driver);
+
+}
+
+module_init(i82875p_init);
+module_exit(i82875p_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
+MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
new file mode 100644
index 0000000..2eed3ea
--- /dev/null
+++ b/drivers/edac/i82975x_edac.c
@@ -0,0 +1,672 @@
+/*
+ * Intel 82975X Memory Controller kernel module
+ * (C) 2007 aCarLab (India) Pvt. Ltd. (http://acarlab.com)
+ * (C) 2007 jetzbroadband (http://jetzbroadband.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Arvind R.
+ *   Copied from i82875p_edac.c source:
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define I82975X_REVISION	" Ver: 1.0.0 " __DATE__
+#define EDAC_MOD_STR		"i82975x_edac"
+
+#define i82975x_printk(level, fmt, arg...) \
+	edac_printk(level, "i82975x", fmt, ##arg)
+
+#define i82975x_mc_printk(mci, level, fmt, arg...) \
+	edac_mc_chipset_printk(mci, level, "i82975x", fmt, ##arg)
+
+#ifndef PCI_DEVICE_ID_INTEL_82975_0
+#define PCI_DEVICE_ID_INTEL_82975_0	0x277c
+#endif				/* PCI_DEVICE_ID_INTEL_82975_0 */
+
+#define I82975X_NR_CSROWS(nr_chans)		(8/(nr_chans))
+
+/* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */
+#define I82975X_EAP		0x58	/* Dram Error Address Pointer (32b)
+					 *
+					 * 31:7  128 byte cache-line address
+					 * 6:1   reserved
+					 * 0     0: CH0; 1: CH1
+					 */
+
+#define I82975X_DERRSYN		0x5c	/* Dram Error SYNdrome (8b)
+					 *
+					 *  7:0  DRAM ECC Syndrome
+					 */
+
+#define I82975X_DES		0x5d	/* Dram ERRor DeSTination (8b)
+					 * 0h:    Processor Memory Reads
+					 * 1h:7h  reserved
+					 * More - See Page 65 of Intel DocSheet.
+					 */
+
+#define I82975X_ERRSTS		0xc8	/* Error Status Register (16b)
+					 *
+					 * 15:12 reserved
+					 * 11    Thermal Sensor Event
+					 * 10    reserved
+					 *  9    non-DRAM lock error (ndlock)
+					 *  8    Refresh Timeout
+					 *  7:2  reserved
+					 *  1    ECC UE (multibit DRAM error)
+					 *  0    ECC CE (singlebit DRAM error)
+					 */
+
+/* Error Reporting is supported by 3 mechanisms:
+  1. DMI SERR generation  ( ERRCMD )
+  2. SMI DMI  generation  ( SMICMD )
+  3. SCI DMI  generation  ( SCICMD )
+NOTE: Only ONE of the three must be enabled
+*/
+#define I82975X_ERRCMD		0xca	/* Error Command (16b)
+					 *
+					 * 15:12 reserved
+					 * 11    Thermal Sensor Event
+					 * 10    reserved
+					 *  9    non-DRAM lock error (ndlock)
+					 *  8    Refresh Timeout
+					 *  7:2  reserved
+					 *  1    ECC UE (multibit DRAM error)
+					 *  0    ECC CE (singlebit DRAM error)
+					 */
+
+#define I82975X_SMICMD		0xcc	/* Error Command (16b)
+					 *
+					 * 15:2  reserved
+					 *  1    ECC UE (multibit DRAM error)
+					 *  0    ECC CE (singlebit DRAM error)
+					 */
+
+#define I82975X_SCICMD		0xce	/* Error Command (16b)
+					 *
+					 * 15:2  reserved
+					 *  1    ECC UE (multibit DRAM error)
+					 *  0    ECC CE (singlebit DRAM error)
+					 */
+
+#define I82975X_XEAP	0xfc	/* Extended Dram Error Address Pointer (8b)
+					 *
+					 * 7:1   reserved
+					 * 0     Bit32 of the Dram Error Address
+					 */
+
+#define I82975X_MCHBAR		0x44	/*
+					 *
+					 * 31:14 Base Addr of 16K memory-mapped
+					 *	configuration space
+					 * 13:1  reserverd
+					 *  0    mem-mapped config space enable
+					 */
+
+/* NOTE: Following addresses have to indexed using MCHBAR offset (44h, 32b) */
+/* Intel 82975x memory mapped register space */
+
+#define I82975X_DRB_SHIFT 25	/* fixed 32MiB grain */
+
+#define I82975X_DRB		0x100	/* DRAM Row Boundary (8b x 8)
+					 *
+					 * 7   set to 1 in highest DRB of
+					 *	channel if 4GB in ch.
+					 * 6:2 upper boundary of rank in
+					 *	32MB grains
+					 * 1:0 set to 0
+					 */
+#define I82975X_DRB_CH0R0		0x100
+#define I82975X_DRB_CH0R1		0x101
+#define I82975X_DRB_CH0R2		0x102
+#define I82975X_DRB_CH0R3		0x103
+#define I82975X_DRB_CH1R0		0x180
+#define I82975X_DRB_CH1R1		0x181
+#define I82975X_DRB_CH1R2		0x182
+#define I82975X_DRB_CH1R3		0x183
+
+
+#define I82975X_DRA		0x108	/* DRAM Row Attribute (4b x 8)
+					 *  defines the PAGE SIZE to be used
+					 *	for the rank
+					 *  7    reserved
+					 *  6:4  row attr of odd rank, i.e. 1
+					 *  3    reserved
+					 *  2:0  row attr of even rank, i.e. 0
+					 *
+					 * 000 = unpopulated
+					 * 001 = reserved
+					 * 010 = 4KiB
+					 * 011 = 8KiB
+					 * 100 = 16KiB
+					 * others = reserved
+					 */
+#define I82975X_DRA_CH0R01		0x108
+#define I82975X_DRA_CH0R23		0x109
+#define I82975X_DRA_CH1R01		0x188
+#define I82975X_DRA_CH1R23		0x189
+
+
+#define I82975X_BNKARC	0x10e /* Type of device in each rank - Bank Arch (16b)
+					 *
+					 * 15:8  reserved
+					 * 7:6  Rank 3 architecture
+					 * 5:4  Rank 2 architecture
+					 * 3:2  Rank 1 architecture
+					 * 1:0  Rank 0 architecture
+					 *
+					 * 00 => x16 devices; i.e 4 banks
+					 * 01 => x8  devices; i.e 8 banks
+					 */
+#define I82975X_C0BNKARC	0x10e
+#define I82975X_C1BNKARC	0x18e
+
+
+
+#define I82975X_DRC		0x120 /* DRAM Controller Mode0 (32b)
+					 *
+					 * 31:30 reserved
+					 * 29    init complete
+					 * 28:11 reserved, according to Intel
+					 *    22:21 number of channels
+					 *		00=1 01=2 in 82875
+					 *		seems to be ECC mode
+					 *		bits in 82975 in Asus
+					 *		P5W
+					 *	 19:18 Data Integ Mode
+					 *		00=none 01=ECC in 82875
+					 * 10:8  refresh mode
+					 *  7    reserved
+					 *  6:4  mode select
+					 *  3:2  reserved
+					 *  1:0  DRAM type 10=Second Revision
+					 *		DDR2 SDRAM
+					 *         00, 01, 11 reserved
+					 */
+#define I82975X_DRC_CH0M0		0x120
+#define I82975X_DRC_CH1M0		0x1A0
+
+
+#define I82975X_DRC_M1	0x124 /* DRAM Controller Mode1 (32b)
+					 * 31	0=Standard Address Map
+					 *	1=Enhanced Address Map
+					 * 30:0	reserved
+					 */
+
+#define I82975X_DRC_CH0M1		0x124
+#define I82975X_DRC_CH1M1		0x1A4
+
+enum i82975x_chips {
+	I82975X = 0,
+};
+
+struct i82975x_pvt {
+	void __iomem *mch_window;
+};
+
+struct i82975x_dev_info {
+	const char *ctl_name;
+};
+
+struct i82975x_error_info {
+	u16 errsts;
+	u32 eap;
+	u8 des;
+	u8 derrsyn;
+	u16 errsts2;
+	u8 chan;		/* the channel is bit 0 of EAP */
+	u8 xeap;		/* extended eap bit */
+};
+
+static const struct i82975x_dev_info i82975x_devs[] = {
+	[I82975X] = {
+		.ctl_name = "i82975x"
+	},
+};
+
+static struct pci_dev *mci_pdev;	/* init dev: in case that AGP code has
+					 * already registered driver
+					 */
+
+static int i82975x_registered = 1;
+
+static void i82975x_get_error_info(struct mem_ctl_info *mci,
+		struct i82975x_error_info *info)
+{
+	struct pci_dev *pdev;
+
+	pdev = to_pci_dev(mci->dev);
+
+	/*
+	 * This is a mess because there is no atomic way to read all the
+	 * registers at once and the registers can transition from CE being
+	 * overwritten by UE.
+	 */
+	pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts);
+	pci_read_config_dword(pdev, I82975X_EAP, &info->eap);
+	pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap);
+	pci_read_config_byte(pdev, I82975X_DES, &info->des);
+	pci_read_config_byte(pdev, I82975X_DERRSYN, &info->derrsyn);
+	pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts2);
+
+	pci_write_bits16(pdev, I82975X_ERRSTS, 0x0003, 0x0003);
+
+	/*
+	 * If the error is the same then we can for both reads then
+	 * the first set of reads is valid.  If there is a change then
+	 * there is a CE no info and the second set of reads is valid
+	 * and should be UE info.
+	 */
+	if (!(info->errsts2 & 0x0003))
+		return;
+
+	if ((info->errsts ^ info->errsts2) & 0x0003) {
+		pci_read_config_dword(pdev, I82975X_EAP, &info->eap);
+		pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap);
+		pci_read_config_byte(pdev, I82975X_DES, &info->des);
+		pci_read_config_byte(pdev, I82975X_DERRSYN,
+				&info->derrsyn);
+	}
+}
+
+static int i82975x_process_error_info(struct mem_ctl_info *mci,
+		struct i82975x_error_info *info, int handle_errors)
+{
+	int row, multi_chan, chan;
+
+	multi_chan = mci->csrows[0].nr_channels - 1;
+
+	if (!(info->errsts2 & 0x0003))
+		return 0;
+
+	if (!handle_errors)
+		return 1;
+
+	if ((info->errsts ^ info->errsts2) & 0x0003) {
+		edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+		info->errsts = info->errsts2;
+	}
+
+	chan = info->eap & 1;
+	info->eap >>= 1;
+	if (info->xeap )
+		info->eap |= 0x80000000;
+	info->eap >>= PAGE_SHIFT;
+	row = edac_mc_find_csrow_by_page(mci, info->eap);
+
+	if (info->errsts & 0x0002)
+		edac_mc_handle_ue(mci, info->eap, 0, row, "i82975x UE");
+	else
+		edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
+				multi_chan ? chan : 0,
+				"i82975x CE");
+
+	return 1;
+}
+
+static void i82975x_check(struct mem_ctl_info *mci)
+{
+	struct i82975x_error_info info;
+
+	debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+	i82975x_get_error_info(mci, &info);
+	i82975x_process_error_info(mci, &info, 1);
+}
+
+/* Return 1 if dual channel mode is active.  Else return 0. */
+static int dual_channel_active(void __iomem *mch_window)
+{
+	/*
+	 * We treat interleaved-symmetric configuration as dual-channel - EAP's
+	 * bit-0 giving the channel of the error location.
+	 *
+	 * All other configurations are treated as single channel - the EAP's
+	 * bit-0 will resolve ok in symmetric area of mixed
+	 * (symmetric/asymmetric) configurations
+	 */
+	u8	drb[4][2];
+	int	row;
+	int    dualch;
+
+	for (dualch = 1, row = 0; dualch && (row < 4); row++) {
+		drb[row][0] = readb(mch_window + I82975X_DRB + row);
+		drb[row][1] = readb(mch_window + I82975X_DRB + row + 0x80);
+		dualch = dualch && (drb[row][0] == drb[row][1]);
+	}
+	return dualch;
+}
+
+static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
+{
+	/*
+	 * ASUS P5W DH either does not program this register or programs
+	 * it wrong!
+	 * ECC is possible on i92975x ONLY with DEV_X8 which should mean 'val'
+	 * for each rank should be 01b - the LSB of the word should be 0x55;
+	 * but it reads 0!
+	 */
+	return DEV_X8;
+}
+
+static void i82975x_init_csrows(struct mem_ctl_info *mci,
+		struct pci_dev *pdev, void __iomem *mch_window)
+{
+	struct csrow_info *csrow;
+	unsigned long last_cumul_size;
+	u8 value;
+	u32 cumul_size;
+	int index;
+
+	last_cumul_size = 0;
+
+	/*
+	 * 82875 comment:
+	 * The dram row boundary (DRB) reg values are boundary address
+	 * for each DRAM row with a granularity of 32 or 64MB (single/dual
+	 * channel operation).  DRB regs are cumulative; therefore DRB7 will
+	 * contain the total memory contained in all eight rows.
+	 *
+	 * FIXME:
+	 *  EDAC currently works for Dual-channel Interleaved configuration.
+	 *  Other configurations, which the chip supports, need fixing/testing.
+	 *
+	 */
+
+	for (index = 0; index < mci->nr_csrows; index++) {
+		csrow = &mci->csrows[index];
+
+		value = readb(mch_window + I82975X_DRB + index +
+					((index >= 4) ? 0x80 : 0));
+		cumul_size = value;
+		cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT);
+		debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
+			cumul_size);
+		if (cumul_size == last_cumul_size)
+			continue;	/* not populated */
+
+		csrow->first_page = last_cumul_size;
+		csrow->last_page = cumul_size - 1;
+		csrow->nr_pages = cumul_size - last_cumul_size;
+		last_cumul_size = cumul_size;
+		csrow->grain = 1 << 7;	/* I82975X_EAP has 128B resolution */
+		csrow->mtype = MEM_DDR; /* i82975x supports only DDR2 */
+		csrow->dtype = i82975x_dram_type(mch_window, index);
+		csrow->edac_mode = EDAC_SECDED; /* only supported */
+	}
+}
+
+/* #define  i82975x_DEBUG_IOMEM */
+
+#ifdef i82975x_DEBUG_IOMEM
+static void i82975x_print_dram_timings(void __iomem *mch_window)
+{
+	/*
+	 * The register meanings are from Intel specs;
+	 * (shows 13-5-5-5 for 800-DDR2)
+	 * Asus P5W Bios reports 15-5-4-4
+	 * What's your religion?
+	 */
+	static const int caslats[4] = { 5, 4, 3, 6 };
+	u32	dtreg[2];
+
+	dtreg[0] = readl(mch_window + 0x114);
+	dtreg[1] = readl(mch_window + 0x194);
+	i82975x_printk(KERN_INFO, "DRAM Timings :     Ch0    Ch1\n"
+		"                RAS Active Min = %d     %d\n"
+		"                CAS latency    =  %d      %d\n"
+		"                RAS to CAS     =  %d      %d\n"
+		"                RAS precharge  =  %d      %d\n",
+		(dtreg[0] >> 19 ) & 0x0f,
+			(dtreg[1] >> 19) & 0x0f,
+		caslats[(dtreg[0] >> 8) & 0x03],
+			caslats[(dtreg[1] >> 8) & 0x03],
+		((dtreg[0] >> 4) & 0x07) + 2,
+			((dtreg[1] >> 4) & 0x07) + 2,
+		(dtreg[0] & 0x07) + 2,
+			(dtreg[1] & 0x07) + 2
+	);
+
+}
+#endif
+
+static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
+{
+	int rc = -ENODEV;
+	struct mem_ctl_info *mci;
+	struct i82975x_pvt *pvt;
+	void __iomem *mch_window;
+	u32 mchbar;
+	u32 drc[2];
+	struct i82975x_error_info discard;
+	int	chans;
+#ifdef i82975x_DEBUG_IOMEM
+	u8 c0drb[4];
+	u8 c1drb[4];
+#endif
+
+	debugf0("%s()\n", __func__);
+
+	pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar);
+	if (!(mchbar & 1)) {
+		debugf3("%s(): failed, MCHBAR disabled!\n", __func__);
+		goto fail0;
+	}
+	mchbar &= 0xffffc000;	/* bits 31:14 used for 16K window */
+	mch_window = ioremap_nocache(mchbar, 0x1000);
+
+#ifdef i82975x_DEBUG_IOMEM
+	i82975x_printk(KERN_INFO, "MCHBAR real = %0x, remapped = %p\n",
+					mchbar, mch_window);
+
+	c0drb[0] = readb(mch_window + I82975X_DRB_CH0R0);
+	c0drb[1] = readb(mch_window + I82975X_DRB_CH0R1);
+	c0drb[2] = readb(mch_window + I82975X_DRB_CH0R2);
+	c0drb[3] = readb(mch_window + I82975X_DRB_CH0R3);
+	c1drb[0] = readb(mch_window + I82975X_DRB_CH1R0);
+	c1drb[1] = readb(mch_window + I82975X_DRB_CH1R1);
+	c1drb[2] = readb(mch_window + I82975X_DRB_CH1R2);
+	c1drb[3] = readb(mch_window + I82975X_DRB_CH1R3);
+	i82975x_printk(KERN_INFO, "DRBCH0R0 = 0x%02x\n", c0drb[0]);
+	i82975x_printk(KERN_INFO, "DRBCH0R1 = 0x%02x\n", c0drb[1]);
+	i82975x_printk(KERN_INFO, "DRBCH0R2 = 0x%02x\n", c0drb[2]);
+	i82975x_printk(KERN_INFO, "DRBCH0R3 = 0x%02x\n", c0drb[3]);
+	i82975x_printk(KERN_INFO, "DRBCH1R0 = 0x%02x\n", c1drb[0]);
+	i82975x_printk(KERN_INFO, "DRBCH1R1 = 0x%02x\n", c1drb[1]);
+	i82975x_printk(KERN_INFO, "DRBCH1R2 = 0x%02x\n", c1drb[2]);
+	i82975x_printk(KERN_INFO, "DRBCH1R3 = 0x%02x\n", c1drb[3]);
+#endif
+
+	drc[0] = readl(mch_window + I82975X_DRC_CH0M0);
+	drc[1] = readl(mch_window + I82975X_DRC_CH1M0);
+#ifdef i82975x_DEBUG_IOMEM
+	i82975x_printk(KERN_INFO, "DRC_CH0 = %0x, %s\n", drc[0],
+			((drc[0] >> 21) & 3) == 1 ?
+				"ECC enabled" : "ECC disabled");
+	i82975x_printk(KERN_INFO, "DRC_CH1 = %0x, %s\n", drc[1],
+			((drc[1] >> 21) & 3) == 1 ?
+				"ECC enabled" : "ECC disabled");
+
+	i82975x_printk(KERN_INFO, "C0 BNKARC = %0x\n",
+		readw(mch_window + I82975X_C0BNKARC));
+	i82975x_printk(KERN_INFO, "C1 BNKARC = %0x\n",
+		readw(mch_window + I82975X_C1BNKARC));
+	i82975x_print_dram_timings(mch_window);
+	goto fail1;
+#endif
+	if (!(((drc[0] >> 21) & 3) == 1 || ((drc[1] >> 21) & 3) == 1)) {
+		i82975x_printk(KERN_INFO, "ECC disabled on both channels.\n");
+		goto fail1;
+	}
+
+	chans = dual_channel_active(mch_window) + 1;
+
+	/* assuming only one controller, index thus is 0 */
+	mci = edac_mc_alloc(sizeof(*pvt), I82975X_NR_CSROWS(chans),
+					chans, 0);
+	if (!mci) {
+		rc = -ENOMEM;
+		goto fail1;
+	}
+
+	debugf3("%s(): init mci\n", __func__);
+	mci->dev = &pdev->dev;
+	mci->mtype_cap = MEM_FLAG_DDR;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+	mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+	mci->mod_name = EDAC_MOD_STR;
+	mci->mod_ver = I82975X_REVISION;
+	mci->ctl_name = i82975x_devs[dev_idx].ctl_name;
+	mci->edac_check = i82975x_check;
+	mci->ctl_page_to_phys = NULL;
+	debugf3("%s(): init pvt\n", __func__);
+	pvt = (struct i82975x_pvt *) mci->pvt_info;
+	pvt->mch_window = mch_window;
+	i82975x_init_csrows(mci, pdev, mch_window);
+	i82975x_get_error_info(mci, &discard);  /* clear counters */
+
+	/* finalize this instance of memory controller with edac core */
+	if (edac_mc_add_mc(mci)) {
+		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+		goto fail2;
+	}
+
+	/* get this far and it's successful */
+	debugf3("%s(): success\n", __func__);
+	return 0;
+
+fail2:
+	edac_mc_free(mci);
+
+fail1:
+	iounmap(mch_window);
+fail0:
+	return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i82975x_init_one(struct pci_dev *pdev,
+		const struct pci_device_id *ent)
+{
+	int rc;
+
+	debugf0("%s()\n", __func__);
+
+	if (pci_enable_device(pdev) < 0)
+		return -EIO;
+
+	rc = i82975x_probe1(pdev, ent->driver_data);
+
+	if (mci_pdev == NULL)
+		mci_pdev = pci_dev_get(pdev);
+
+	return rc;
+}
+
+static void __devexit i82975x_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+	struct i82975x_pvt *pvt;
+
+	debugf0("%s()\n", __func__);
+
+	mci = edac_mc_del_mc(&pdev->dev);
+	if (mci  == NULL)
+		return;
+
+	pvt = mci->pvt_info;
+	if (pvt->mch_window)
+		iounmap( pvt->mch_window );
+
+	edac_mc_free(mci);
+}
+
+static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
+	{
+		PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		I82975X
+	},
+	{
+		0,
+	}	/* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i82975x_pci_tbl);
+
+static struct pci_driver i82975x_driver = {
+	.name = EDAC_MOD_STR,
+	.probe = i82975x_init_one,
+	.remove = __devexit_p(i82975x_remove_one),
+	.id_table = i82975x_pci_tbl,
+};
+
+static int __init i82975x_init(void)
+{
+	int pci_rc;
+
+	debugf3("%s()\n", __func__);
+
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
+	pci_rc = pci_register_driver(&i82975x_driver);
+	if (pci_rc < 0)
+		goto fail0;
+
+	if (mci_pdev == NULL) {
+		mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+				PCI_DEVICE_ID_INTEL_82975_0, NULL);
+
+		if (!mci_pdev) {
+			debugf0("i82975x pci_get_device fail\n");
+			pci_rc = -ENODEV;
+			goto fail1;
+		}
+
+		pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl);
+
+		if (pci_rc < 0) {
+			debugf0("i82975x init fail\n");
+			pci_rc = -ENODEV;
+			goto fail1;
+		}
+	}
+
+	return 0;
+
+fail1:
+	pci_unregister_driver(&i82975x_driver);
+
+fail0:
+	if (mci_pdev != NULL)
+		pci_dev_put(mci_pdev);
+
+	return pci_rc;
+}
+
+static void __exit i82975x_exit(void)
+{
+	debugf3("%s()\n", __func__);
+
+	pci_unregister_driver(&i82975x_driver);
+
+	if (!i82975x_registered) {
+		i82975x_remove_one(mci_pdev);
+		pci_dev_put(mci_pdev);
+	}
+}
+
+module_init(i82975x_init);
+module_exit(i82975x_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>");
+MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
new file mode 100644
index 0000000..cf27402
--- /dev/null
+++ b/drivers/edac/mpc85xx_edac.c
@@ -0,0 +1,1088 @@
+/*
+ * Freescale MPC85xx Memory Controller kenel module
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/edac.h>
+#include <linux/smp.h>
+
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include "edac_module.h"
+#include "edac_core.h"
+#include "mpc85xx_edac.h"
+
+static int edac_dev_idx;
+#ifdef CONFIG_PCI
+static int edac_pci_idx;
+#endif
+static int edac_mc_idx;
+
+static u32 orig_ddr_err_disable;
+static u32 orig_ddr_err_sbe;
+
+/*
+ * PCI Err defines
+ */
+#ifdef CONFIG_PCI
+static u32 orig_pci_err_cap_dr;
+static u32 orig_pci_err_en;
+#endif
+
+static u32 orig_l2_err_disable;
+#ifdef CONFIG_MPC85xx
+static u32 orig_hid1[2];
+#endif
+
+/************************ MC SYSFS parts ***********************************/
+
+static ssize_t mpc85xx_mc_inject_data_hi_show(struct mem_ctl_info *mci,
+					      char *data)
+{
+	struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+	return sprintf(data, "0x%08x",
+		       in_be32(pdata->mc_vbase +
+			       MPC85XX_MC_DATA_ERR_INJECT_HI));
+}
+
+static ssize_t mpc85xx_mc_inject_data_lo_show(struct mem_ctl_info *mci,
+					      char *data)
+{
+	struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+	return sprintf(data, "0x%08x",
+		       in_be32(pdata->mc_vbase +
+			       MPC85XX_MC_DATA_ERR_INJECT_LO));
+}
+
+static ssize_t mpc85xx_mc_inject_ctrl_show(struct mem_ctl_info *mci, char *data)
+{
+	struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+	return sprintf(data, "0x%08x",
+		       in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT));
+}
+
+static ssize_t mpc85xx_mc_inject_data_hi_store(struct mem_ctl_info *mci,
+					       const char *data, size_t count)
+{
+	struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+	if (isdigit(*data)) {
+		out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI,
+			 simple_strtoul(data, NULL, 0));
+		return count;
+	}
+	return 0;
+}
+
+static ssize_t mpc85xx_mc_inject_data_lo_store(struct mem_ctl_info *mci,
+					       const char *data, size_t count)
+{
+	struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+	if (isdigit(*data)) {
+		out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO,
+			 simple_strtoul(data, NULL, 0));
+		return count;
+	}
+	return 0;
+}
+
+static ssize_t mpc85xx_mc_inject_ctrl_store(struct mem_ctl_info *mci,
+					    const char *data, size_t count)
+{
+	struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+	if (isdigit(*data)) {
+		out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT,
+			 simple_strtoul(data, NULL, 0));
+		return count;
+	}
+	return 0;
+}
+
+static struct mcidev_sysfs_attribute mpc85xx_mc_sysfs_attributes[] = {
+	{
+	 .attr = {
+		  .name = "inject_data_hi",
+		  .mode = (S_IRUGO | S_IWUSR)
+		  },
+	 .show = mpc85xx_mc_inject_data_hi_show,
+	 .store = mpc85xx_mc_inject_data_hi_store},
+	{
+	 .attr = {
+		  .name = "inject_data_lo",
+		  .mode = (S_IRUGO | S_IWUSR)
+		  },
+	 .show = mpc85xx_mc_inject_data_lo_show,
+	 .store = mpc85xx_mc_inject_data_lo_store},
+	{
+	 .attr = {
+		  .name = "inject_ctrl",
+		  .mode = (S_IRUGO | S_IWUSR)
+		  },
+	 .show = mpc85xx_mc_inject_ctrl_show,
+	 .store = mpc85xx_mc_inject_ctrl_store},
+
+	/* End of list */
+	{
+	 .attr = {.name = NULL}
+	 }
+};
+
+static void mpc85xx_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
+{
+	mci->mc_driver_sysfs_attributes = mpc85xx_mc_sysfs_attributes;
+}
+
+/**************************** PCI Err device ***************************/
+#ifdef CONFIG_PCI
+
+static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
+{
+	struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
+	u32 err_detect;
+
+	err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
+
+	/* master aborts can happen during PCI config cycles */
+	if (!(err_detect & ~(PCI_EDE_MULTI_ERR | PCI_EDE_MST_ABRT))) {
+		out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
+		return;
+	}
+
+	printk(KERN_ERR "PCI error(s) detected\n");
+	printk(KERN_ERR "PCI/X ERR_DR register: %#08x\n", err_detect);
+
+	printk(KERN_ERR "PCI/X ERR_ATTRIB register: %#08x\n",
+	       in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ATTRIB));
+	printk(KERN_ERR "PCI/X ERR_ADDR register: %#08x\n",
+	       in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR));
+	printk(KERN_ERR "PCI/X ERR_EXT_ADDR register: %#08x\n",
+	       in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EXT_ADDR));
+	printk(KERN_ERR "PCI/X ERR_DL register: %#08x\n",
+	       in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DL));
+	printk(KERN_ERR "PCI/X ERR_DH register: %#08x\n",
+	       in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DH));
+
+	/* clear error bits */
+	out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
+
+	if (err_detect & PCI_EDE_PERR_MASK)
+		edac_pci_handle_pe(pci, pci->ctl_name);
+
+	if ((err_detect & ~PCI_EDE_MULTI_ERR) & ~PCI_EDE_PERR_MASK)
+		edac_pci_handle_npe(pci, pci->ctl_name);
+}
+
+static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
+{
+	struct edac_pci_ctl_info *pci = dev_id;
+	struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
+	u32 err_detect;
+
+	err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
+
+	if (!err_detect)
+		return IRQ_NONE;
+
+	mpc85xx_pci_check(pci);
+
+	return IRQ_HANDLED;
+}
+
+static int __devinit mpc85xx_pci_err_probe(struct of_device *op,
+					   const struct of_device_id *match)
+{
+	struct edac_pci_ctl_info *pci;
+	struct mpc85xx_pci_pdata *pdata;
+	struct resource r;
+	int res = 0;
+
+	if (!devres_open_group(&op->dev, mpc85xx_pci_err_probe, GFP_KERNEL))
+		return -ENOMEM;
+
+	pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err");
+	if (!pci)
+		return -ENOMEM;
+
+	pdata = pci->pvt_info;
+	pdata->name = "mpc85xx_pci_err";
+	pdata->irq = NO_IRQ;
+	dev_set_drvdata(&op->dev, pci);
+	pci->dev = &op->dev;
+	pci->mod_name = EDAC_MOD_STR;
+	pci->ctl_name = pdata->name;
+	pci->dev_name = dev_name(&op->dev);
+
+	if (edac_op_state == EDAC_OPSTATE_POLL)
+		pci->edac_check = mpc85xx_pci_check;
+
+	pdata->edac_idx = edac_pci_idx++;
+
+	res = of_address_to_resource(op->node, 0, &r);
+	if (res) {
+		printk(KERN_ERR "%s: Unable to get resource for "
+		       "PCI err regs\n", __func__);
+		goto err;
+	}
+
+	/* we only need the error registers */
+	r.start += 0xe00;
+
+	if (!devm_request_mem_region(&op->dev, r.start,
+					r.end - r.start + 1, pdata->name)) {
+		printk(KERN_ERR "%s: Error while requesting mem region\n",
+		       __func__);
+		res = -EBUSY;
+		goto err;
+	}
+
+	pdata->pci_vbase = devm_ioremap(&op->dev, r.start,
+					r.end - r.start + 1);
+	if (!pdata->pci_vbase) {
+		printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
+		res = -ENOMEM;
+		goto err;
+	}
+
+	orig_pci_err_cap_dr =
+	    in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
+
+	/* PCI master abort is expected during config cycles */
+	out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
+
+	orig_pci_err_en = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
+
+	/* disable master abort reporting */
+	out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
+
+	/* clear error bits */
+	out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
+
+	if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
+		debugf3("%s(): failed edac_pci_add_device()\n", __func__);
+		goto err;
+	}
+
+	if (edac_op_state == EDAC_OPSTATE_INT) {
+		pdata->irq = irq_of_parse_and_map(op->node, 0);
+		res = devm_request_irq(&op->dev, pdata->irq,
+				       mpc85xx_pci_isr, IRQF_DISABLED,
+				       "[EDAC] PCI err", pci);
+		if (res < 0) {
+			printk(KERN_ERR
+			       "%s: Unable to requiest irq %d for "
+			       "MPC85xx PCI err\n", __func__, pdata->irq);
+			irq_dispose_mapping(pdata->irq);
+			res = -ENODEV;
+			goto err2;
+		}
+
+		printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
+		       pdata->irq);
+	}
+
+	devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
+	debugf3("%s(): success\n", __func__);
+	printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
+
+	return 0;
+
+err2:
+	edac_pci_del_device(&op->dev);
+err:
+	edac_pci_free_ctl_info(pci);
+	devres_release_group(&op->dev, mpc85xx_pci_err_probe);
+	return res;
+}
+
+static int mpc85xx_pci_err_remove(struct of_device *op)
+{
+	struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
+	struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
+
+	debugf0("%s()\n", __func__);
+
+	out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR,
+		 orig_pci_err_cap_dr);
+
+	out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en);
+
+	edac_pci_del_device(pci->dev);
+
+	if (edac_op_state == EDAC_OPSTATE_INT)
+		irq_dispose_mapping(pdata->irq);
+
+	edac_pci_free_ctl_info(pci);
+
+	return 0;
+}
+
+static struct of_device_id mpc85xx_pci_err_of_match[] = {
+	{
+	 .compatible = "fsl,mpc8540-pcix",
+	 },
+	{
+	 .compatible = "fsl,mpc8540-pci",
+	},
+	{},
+};
+
+static struct of_platform_driver mpc85xx_pci_err_driver = {
+	.owner = THIS_MODULE,
+	.name = "mpc85xx_pci_err",
+	.match_table = mpc85xx_pci_err_of_match,
+	.probe = mpc85xx_pci_err_probe,
+	.remove = __devexit_p(mpc85xx_pci_err_remove),
+	.driver = {
+		   .name = "mpc85xx_pci_err",
+		   .owner = THIS_MODULE,
+		   },
+};
+
+#endif				/* CONFIG_PCI */
+
+/**************************** L2 Err device ***************************/
+
+/************************ L2 SYSFS parts ***********************************/
+
+static ssize_t mpc85xx_l2_inject_data_hi_show(struct edac_device_ctl_info
+					      *edac_dev, char *data)
+{
+	struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+	return sprintf(data, "0x%08x",
+		       in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI));
+}
+
+static ssize_t mpc85xx_l2_inject_data_lo_show(struct edac_device_ctl_info
+					      *edac_dev, char *data)
+{
+	struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+	return sprintf(data, "0x%08x",
+		       in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO));
+}
+
+static ssize_t mpc85xx_l2_inject_ctrl_show(struct edac_device_ctl_info
+					   *edac_dev, char *data)
+{
+	struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+	return sprintf(data, "0x%08x",
+		       in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL));
+}
+
+static ssize_t mpc85xx_l2_inject_data_hi_store(struct edac_device_ctl_info
+					       *edac_dev, const char *data,
+					       size_t count)
+{
+	struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+	if (isdigit(*data)) {
+		out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI,
+			 simple_strtoul(data, NULL, 0));
+		return count;
+	}
+	return 0;
+}
+
+static ssize_t mpc85xx_l2_inject_data_lo_store(struct edac_device_ctl_info
+					       *edac_dev, const char *data,
+					       size_t count)
+{
+	struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+	if (isdigit(*data)) {
+		out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO,
+			 simple_strtoul(data, NULL, 0));
+		return count;
+	}
+	return 0;
+}
+
+static ssize_t mpc85xx_l2_inject_ctrl_store(struct edac_device_ctl_info
+					    *edac_dev, const char *data,
+					    size_t count)
+{
+	struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+	if (isdigit(*data)) {
+		out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL,
+			 simple_strtoul(data, NULL, 0));
+		return count;
+	}
+	return 0;
+}
+
+static struct edac_dev_sysfs_attribute mpc85xx_l2_sysfs_attributes[] = {
+	{
+	 .attr = {
+		  .name = "inject_data_hi",
+		  .mode = (S_IRUGO | S_IWUSR)
+		  },
+	 .show = mpc85xx_l2_inject_data_hi_show,
+	 .store = mpc85xx_l2_inject_data_hi_store},
+	{
+	 .attr = {
+		  .name = "inject_data_lo",
+		  .mode = (S_IRUGO | S_IWUSR)
+		  },
+	 .show = mpc85xx_l2_inject_data_lo_show,
+	 .store = mpc85xx_l2_inject_data_lo_store},
+	{
+	 .attr = {
+		  .name = "inject_ctrl",
+		  .mode = (S_IRUGO | S_IWUSR)
+		  },
+	 .show = mpc85xx_l2_inject_ctrl_show,
+	 .store = mpc85xx_l2_inject_ctrl_store},
+
+	/* End of list */
+	{
+	 .attr = {.name = NULL}
+	 }
+};
+
+static void mpc85xx_set_l2_sysfs_attributes(struct edac_device_ctl_info
+					    *edac_dev)
+{
+	edac_dev->sysfs_attributes = mpc85xx_l2_sysfs_attributes;
+}
+
+/***************************** L2 ops ***********************************/
+
+static void mpc85xx_l2_check(struct edac_device_ctl_info *edac_dev)
+{
+	struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+	u32 err_detect;
+
+	err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
+
+	if (!(err_detect & L2_EDE_MASK))
+		return;
+
+	printk(KERN_ERR "ECC Error in CPU L2 cache\n");
+	printk(KERN_ERR "L2 Error Detect Register: 0x%08x\n", err_detect);
+	printk(KERN_ERR "L2 Error Capture Data High Register: 0x%08x\n",
+	       in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATAHI));
+	printk(KERN_ERR "L2 Error Capture Data Lo Register: 0x%08x\n",
+	       in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATALO));
+	printk(KERN_ERR "L2 Error Syndrome Register: 0x%08x\n",
+	       in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTECC));
+	printk(KERN_ERR "L2 Error Attributes Capture Register: 0x%08x\n",
+	       in_be32(pdata->l2_vbase + MPC85XX_L2_ERRATTR));
+	printk(KERN_ERR "L2 Error Address Capture Register: 0x%08x\n",
+	       in_be32(pdata->l2_vbase + MPC85XX_L2_ERRADDR));
+
+	/* clear error detect register */
+	out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, err_detect);
+
+	if (err_detect & L2_EDE_CE_MASK)
+		edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
+
+	if (err_detect & L2_EDE_UE_MASK)
+		edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
+{
+	struct edac_device_ctl_info *edac_dev = dev_id;
+	struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+	u32 err_detect;
+
+	err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
+
+	if (!(err_detect & L2_EDE_MASK))
+		return IRQ_NONE;
+
+	mpc85xx_l2_check(edac_dev);
+
+	return IRQ_HANDLED;
+}
+
+static int __devinit mpc85xx_l2_err_probe(struct of_device *op,
+					  const struct of_device_id *match)
+{
+	struct edac_device_ctl_info *edac_dev;
+	struct mpc85xx_l2_pdata *pdata;
+	struct resource r;
+	int res;
+
+	if (!devres_open_group(&op->dev, mpc85xx_l2_err_probe, GFP_KERNEL))
+		return -ENOMEM;
+
+	edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
+					      "cpu", 1, "L", 1, 2, NULL, 0,
+					      edac_dev_idx);
+	if (!edac_dev) {
+		devres_release_group(&op->dev, mpc85xx_l2_err_probe);
+		return -ENOMEM;
+	}
+
+	pdata = edac_dev->pvt_info;
+	pdata->name = "mpc85xx_l2_err";
+	pdata->irq = NO_IRQ;
+	edac_dev->dev = &op->dev;
+	dev_set_drvdata(edac_dev->dev, edac_dev);
+	edac_dev->ctl_name = pdata->name;
+	edac_dev->dev_name = pdata->name;
+
+	res = of_address_to_resource(op->node, 0, &r);
+	if (res) {
+		printk(KERN_ERR "%s: Unable to get resource for "
+		       "L2 err regs\n", __func__);
+		goto err;
+	}
+
+	/* we only need the error registers */
+	r.start += 0xe00;
+
+	if (!devm_request_mem_region(&op->dev, r.start,
+				     r.end - r.start + 1, pdata->name)) {
+		printk(KERN_ERR "%s: Error while requesting mem region\n",
+		       __func__);
+		res = -EBUSY;
+		goto err;
+	}
+
+	pdata->l2_vbase = devm_ioremap(&op->dev, r.start, r.end - r.start + 1);
+	if (!pdata->l2_vbase) {
+		printk(KERN_ERR "%s: Unable to setup L2 err regs\n", __func__);
+		res = -ENOMEM;
+		goto err;
+	}
+
+	out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, ~0);
+
+	orig_l2_err_disable = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS);
+
+	/* clear the err_dis */
+	out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, 0);
+
+	edac_dev->mod_name = EDAC_MOD_STR;
+
+	if (edac_op_state == EDAC_OPSTATE_POLL)
+		edac_dev->edac_check = mpc85xx_l2_check;
+
+	mpc85xx_set_l2_sysfs_attributes(edac_dev);
+
+	pdata->edac_idx = edac_dev_idx++;
+
+	if (edac_device_add_device(edac_dev) > 0) {
+		debugf3("%s(): failed edac_device_add_device()\n", __func__);
+		goto err;
+	}
+
+	if (edac_op_state == EDAC_OPSTATE_INT) {
+		pdata->irq = irq_of_parse_and_map(op->node, 0);
+		res = devm_request_irq(&op->dev, pdata->irq,
+				       mpc85xx_l2_isr, IRQF_DISABLED,
+				       "[EDAC] L2 err", edac_dev);
+		if (res < 0) {
+			printk(KERN_ERR
+			       "%s: Unable to requiest irq %d for "
+			       "MPC85xx L2 err\n", __func__, pdata->irq);
+			irq_dispose_mapping(pdata->irq);
+			res = -ENODEV;
+			goto err2;
+		}
+
+		printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for L2 Err\n",
+		       pdata->irq);
+
+		edac_dev->op_state = OP_RUNNING_INTERRUPT;
+
+		out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, L2_EIE_MASK);
+	}
+
+	devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
+
+	debugf3("%s(): success\n", __func__);
+	printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n");
+
+	return 0;
+
+err2:
+	edac_device_del_device(&op->dev);
+err:
+	devres_release_group(&op->dev, mpc85xx_l2_err_probe);
+	edac_device_free_ctl_info(edac_dev);
+	return res;
+}
+
+static int mpc85xx_l2_err_remove(struct of_device *op)
+{
+	struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
+	struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+
+	debugf0("%s()\n", __func__);
+
+	if (edac_op_state == EDAC_OPSTATE_INT) {
+		out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0);
+		irq_dispose_mapping(pdata->irq);
+	}
+
+	out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable);
+	edac_device_del_device(&op->dev);
+	edac_device_free_ctl_info(edac_dev);
+	return 0;
+}
+
+static struct of_device_id mpc85xx_l2_err_of_match[] = {
+/* deprecate the fsl,85.. forms in the future, 2.6.30? */
+	{ .compatible = "fsl,8540-l2-cache-controller", },
+	{ .compatible = "fsl,8541-l2-cache-controller", },
+	{ .compatible = "fsl,8544-l2-cache-controller", },
+	{ .compatible = "fsl,8548-l2-cache-controller", },
+	{ .compatible = "fsl,8555-l2-cache-controller", },
+	{ .compatible = "fsl,8568-l2-cache-controller", },
+	{ .compatible = "fsl,mpc8536-l2-cache-controller", },
+	{ .compatible = "fsl,mpc8540-l2-cache-controller", },
+	{ .compatible = "fsl,mpc8541-l2-cache-controller", },
+	{ .compatible = "fsl,mpc8544-l2-cache-controller", },
+	{ .compatible = "fsl,mpc8548-l2-cache-controller", },
+	{ .compatible = "fsl,mpc8555-l2-cache-controller", },
+	{ .compatible = "fsl,mpc8560-l2-cache-controller", },
+	{ .compatible = "fsl,mpc8568-l2-cache-controller", },
+	{ .compatible = "fsl,mpc8572-l2-cache-controller", },
+	{ .compatible = "fsl,p2020-l2-cache-controller", },
+	{},
+};
+
+static struct of_platform_driver mpc85xx_l2_err_driver = {
+	.owner = THIS_MODULE,
+	.name = "mpc85xx_l2_err",
+	.match_table = mpc85xx_l2_err_of_match,
+	.probe = mpc85xx_l2_err_probe,
+	.remove = mpc85xx_l2_err_remove,
+	.driver = {
+		   .name = "mpc85xx_l2_err",
+		   .owner = THIS_MODULE,
+		   },
+};
+
+/**************************** MC Err device ***************************/
+
+static void mpc85xx_mc_check(struct mem_ctl_info *mci)
+{
+	struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+	struct csrow_info *csrow;
+	u32 err_detect;
+	u32 syndrome;
+	u32 err_addr;
+	u32 pfn;
+	int row_index;
+
+	err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
+	if (!err_detect)
+		return;
+
+	mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
+			  err_detect);
+
+	/* no more processing if not ECC bit errors */
+	if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
+		out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
+		return;
+	}
+
+	syndrome = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ECC);
+	err_addr = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ADDRESS);
+	pfn = err_addr >> PAGE_SHIFT;
+
+	for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
+		csrow = &mci->csrows[row_index];
+		if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
+			break;
+	}
+
+	mpc85xx_mc_printk(mci, KERN_ERR, "Capture Data High: %#8.8x\n",
+			  in_be32(pdata->mc_vbase +
+				  MPC85XX_MC_CAPTURE_DATA_HI));
+	mpc85xx_mc_printk(mci, KERN_ERR, "Capture Data Low: %#8.8x\n",
+			  in_be32(pdata->mc_vbase +
+				  MPC85XX_MC_CAPTURE_DATA_LO));
+	mpc85xx_mc_printk(mci, KERN_ERR, "syndrome: %#8.8x\n", syndrome);
+	mpc85xx_mc_printk(mci, KERN_ERR, "err addr: %#8.8x\n", err_addr);
+	mpc85xx_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
+
+	/* we are out of range */
+	if (row_index == mci->nr_csrows)
+		mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
+
+	if (err_detect & DDR_EDE_SBE)
+		edac_mc_handle_ce(mci, pfn, err_addr & PAGE_MASK,
+				  syndrome, row_index, 0, mci->ctl_name);
+
+	if (err_detect & DDR_EDE_MBE)
+		edac_mc_handle_ue(mci, pfn, err_addr & PAGE_MASK,
+				  row_index, mci->ctl_name);
+
+	out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
+}
+
+static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id)
+{
+	struct mem_ctl_info *mci = dev_id;
+	struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+	u32 err_detect;
+
+	err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
+	if (!err_detect)
+		return IRQ_NONE;
+
+	mpc85xx_mc_check(mci);
+
+	return IRQ_HANDLED;
+}
+
+static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
+{
+	struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+	struct csrow_info *csrow;
+	u32 sdram_ctl;
+	u32 sdtype;
+	enum mem_type mtype;
+	u32 cs_bnds;
+	int index;
+
+	sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
+
+	sdtype = sdram_ctl & DSC_SDTYPE_MASK;
+	if (sdram_ctl & DSC_RD_EN) {
+		switch (sdtype) {
+		case DSC_SDTYPE_DDR:
+			mtype = MEM_RDDR;
+			break;
+		case DSC_SDTYPE_DDR2:
+			mtype = MEM_RDDR2;
+			break;
+		case DSC_SDTYPE_DDR3:
+			mtype = MEM_RDDR3;
+			break;
+		default:
+			mtype = MEM_UNKNOWN;
+			break;
+		}
+	} else {
+		switch (sdtype) {
+		case DSC_SDTYPE_DDR:
+			mtype = MEM_DDR;
+			break;
+		case DSC_SDTYPE_DDR2:
+			mtype = MEM_DDR2;
+			break;
+		case DSC_SDTYPE_DDR3:
+			mtype = MEM_DDR3;
+			break;
+		default:
+			mtype = MEM_UNKNOWN;
+			break;
+		}
+	}
+
+	for (index = 0; index < mci->nr_csrows; index++) {
+		u32 start;
+		u32 end;
+
+		csrow = &mci->csrows[index];
+		cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
+				  (index * MPC85XX_MC_CS_BNDS_OFS));
+
+		start = (cs_bnds & 0xffff0000) >> 16;
+		end   = (cs_bnds & 0x0000ffff);
+
+		if (start == end)
+			continue;	/* not populated */
+
+		start <<= (24 - PAGE_SHIFT);
+		end   <<= (24 - PAGE_SHIFT);
+		end    |= (1 << (24 - PAGE_SHIFT)) - 1;
+
+		csrow->first_page = start >> PAGE_SHIFT;
+		csrow->last_page = end >> PAGE_SHIFT;
+		csrow->nr_pages = end + 1 - start;
+		csrow->grain = 8;
+		csrow->mtype = mtype;
+		csrow->dtype = DEV_UNKNOWN;
+		if (sdram_ctl & DSC_X32_EN)
+			csrow->dtype = DEV_X32;
+		csrow->edac_mode = EDAC_SECDED;
+	}
+}
+
+static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
+					  const struct of_device_id *match)
+{
+	struct mem_ctl_info *mci;
+	struct mpc85xx_mc_pdata *pdata;
+	struct resource r;
+	u32 sdram_ctl;
+	int res;
+
+	if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
+		return -ENOMEM;
+
+	mci = edac_mc_alloc(sizeof(*pdata), 4, 1, edac_mc_idx);
+	if (!mci) {
+		devres_release_group(&op->dev, mpc85xx_mc_err_probe);
+		return -ENOMEM;
+	}
+
+	pdata = mci->pvt_info;
+	pdata->name = "mpc85xx_mc_err";
+	pdata->irq = NO_IRQ;
+	mci->dev = &op->dev;
+	pdata->edac_idx = edac_mc_idx++;
+	dev_set_drvdata(mci->dev, mci);
+	mci->ctl_name = pdata->name;
+	mci->dev_name = pdata->name;
+
+	res = of_address_to_resource(op->node, 0, &r);
+	if (res) {
+		printk(KERN_ERR "%s: Unable to get resource for MC err regs\n",
+		       __func__);
+		goto err;
+	}
+
+	if (!devm_request_mem_region(&op->dev, r.start,
+				     r.end - r.start + 1, pdata->name)) {
+		printk(KERN_ERR "%s: Error while requesting mem region\n",
+		       __func__);
+		res = -EBUSY;
+		goto err;
+	}
+
+	pdata->mc_vbase = devm_ioremap(&op->dev, r.start, r.end - r.start + 1);
+	if (!pdata->mc_vbase) {
+		printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
+		res = -ENOMEM;
+		goto err;
+	}
+
+	sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
+	if (!(sdram_ctl & DSC_ECC_EN)) {
+		/* no ECC */
+		printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
+		res = -ENODEV;
+		goto err;
+	}
+
+	debugf3("%s(): init mci\n", __func__);
+	mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 |
+	    MEM_FLAG_DDR | MEM_FLAG_DDR2;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+	mci->edac_cap = EDAC_FLAG_SECDED;
+	mci->mod_name = EDAC_MOD_STR;
+	mci->mod_ver = MPC85XX_REVISION;
+
+	if (edac_op_state == EDAC_OPSTATE_POLL)
+		mci->edac_check = mpc85xx_mc_check;
+
+	mci->ctl_page_to_phys = NULL;
+
+	mci->scrub_mode = SCRUB_SW_SRC;
+
+	mpc85xx_set_mc_sysfs_attributes(mci);
+
+	mpc85xx_init_csrows(mci);
+
+#ifdef CONFIG_EDAC_DEBUG
+	edac_mc_register_mcidev_debug((struct attribute **)debug_attr);
+#endif
+
+	/* store the original error disable bits */
+	orig_ddr_err_disable =
+	    in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
+	out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 0);
+
+	/* clear all error bits */
+	out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0);
+
+	if (edac_mc_add_mc(mci)) {
+		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+		goto err;
+	}
+
+	if (edac_op_state == EDAC_OPSTATE_INT) {
+		out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN,
+			 DDR_EIE_MBEE | DDR_EIE_SBEE);
+
+		/* store the original error management threshold */
+		orig_ddr_err_sbe = in_be32(pdata->mc_vbase +
+					   MPC85XX_MC_ERR_SBE) & 0xff0000;
+
+		/* set threshold to 1 error per interrupt */
+		out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000);
+
+		/* register interrupts */
+		pdata->irq = irq_of_parse_and_map(op->node, 0);
+		res = devm_request_irq(&op->dev, pdata->irq,
+				       mpc85xx_mc_isr,
+					IRQF_DISABLED | IRQF_SHARED,
+				       "[EDAC] MC err", mci);
+		if (res < 0) {
+			printk(KERN_ERR "%s: Unable to request irq %d for "
+			       "MPC85xx DRAM ERR\n", __func__, pdata->irq);
+			irq_dispose_mapping(pdata->irq);
+			res = -ENODEV;
+			goto err2;
+		}
+
+		printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC\n",
+		       pdata->irq);
+	}
+
+	devres_remove_group(&op->dev, mpc85xx_mc_err_probe);
+	debugf3("%s(): success\n", __func__);
+	printk(KERN_INFO EDAC_MOD_STR " MC err registered\n");
+
+	return 0;
+
+err2:
+	edac_mc_del_mc(&op->dev);
+err:
+	devres_release_group(&op->dev, mpc85xx_mc_err_probe);
+	edac_mc_free(mci);
+	return res;
+}
+
+static int mpc85xx_mc_err_remove(struct of_device *op)
+{
+	struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
+	struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+
+	debugf0("%s()\n", __func__);
+
+	if (edac_op_state == EDAC_OPSTATE_INT) {
+		out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0);
+		irq_dispose_mapping(pdata->irq);
+	}
+
+	out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE,
+		 orig_ddr_err_disable);
+	out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe);
+
+	edac_mc_del_mc(&op->dev);
+	edac_mc_free(mci);
+	return 0;
+}
+
+static struct of_device_id mpc85xx_mc_err_of_match[] = {
+/* deprecate the fsl,85.. forms in the future, 2.6.30? */
+	{ .compatible = "fsl,8540-memory-controller", },
+	{ .compatible = "fsl,8541-memory-controller", },
+	{ .compatible = "fsl,8544-memory-controller", },
+	{ .compatible = "fsl,8548-memory-controller", },
+	{ .compatible = "fsl,8555-memory-controller", },
+	{ .compatible = "fsl,8568-memory-controller", },
+	{ .compatible = "fsl,mpc8536-memory-controller", },
+	{ .compatible = "fsl,mpc8540-memory-controller", },
+	{ .compatible = "fsl,mpc8541-memory-controller", },
+	{ .compatible = "fsl,mpc8544-memory-controller", },
+	{ .compatible = "fsl,mpc8548-memory-controller", },
+	{ .compatible = "fsl,mpc8555-memory-controller", },
+	{ .compatible = "fsl,mpc8560-memory-controller", },
+	{ .compatible = "fsl,mpc8568-memory-controller", },
+	{ .compatible = "fsl,mpc8572-memory-controller", },
+	{ .compatible = "fsl,mpc8349-memory-controller", },
+	{ .compatible = "fsl,p2020-memory-controller", },
+	{},
+};
+
+static struct of_platform_driver mpc85xx_mc_err_driver = {
+	.owner = THIS_MODULE,
+	.name = "mpc85xx_mc_err",
+	.match_table = mpc85xx_mc_err_of_match,
+	.probe = mpc85xx_mc_err_probe,
+	.remove = mpc85xx_mc_err_remove,
+	.driver = {
+		   .name = "mpc85xx_mc_err",
+		   .owner = THIS_MODULE,
+		   },
+};
+
+#ifdef CONFIG_MPC85xx
+static void __init mpc85xx_mc_clear_rfxe(void *data)
+{
+	orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1);
+	mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~0x20000));
+}
+#endif
+
+static int __init mpc85xx_mc_init(void)
+{
+	int res = 0;
+
+	printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
+	       "(C) 2006 Montavista Software\n");
+
+	/* make sure error reporting method is sane */
+	switch (edac_op_state) {
+	case EDAC_OPSTATE_POLL:
+	case EDAC_OPSTATE_INT:
+		break;
+	default:
+		edac_op_state = EDAC_OPSTATE_INT;
+		break;
+	}
+
+	res = of_register_platform_driver(&mpc85xx_mc_err_driver);
+	if (res)
+		printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n");
+
+	res = of_register_platform_driver(&mpc85xx_l2_err_driver);
+	if (res)
+		printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");
+
+#ifdef CONFIG_PCI
+	res = of_register_platform_driver(&mpc85xx_pci_err_driver);
+	if (res)
+		printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
+#endif
+
+#ifdef CONFIG_MPC85xx
+	/*
+	 * need to clear HID1[RFXE] to disable machine check int
+	 * so we can catch it
+	 */
+	if (edac_op_state == EDAC_OPSTATE_INT)
+		on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0);
+#endif
+
+	return 0;
+}
+
+module_init(mpc85xx_mc_init);
+
+#ifdef CONFIG_MPC85xx
+static void __exit mpc85xx_mc_restore_hid1(void *data)
+{
+	mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]);
+}
+#endif
+
+static void __exit mpc85xx_mc_exit(void)
+{
+#ifdef CONFIG_MPC85xx
+	on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
+#endif
+#ifdef CONFIG_PCI
+	of_unregister_platform_driver(&mpc85xx_pci_err_driver);
+#endif
+	of_unregister_platform_driver(&mpc85xx_l2_err_driver);
+	of_unregister_platform_driver(&mpc85xx_mc_err_driver);
+}
+
+module_exit(mpc85xx_mc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Montavista Software, Inc.");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state,
+		 "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
new file mode 100644
index 0000000..52432ee
--- /dev/null
+++ b/drivers/edac/mpc85xx_edac.h
@@ -0,0 +1,163 @@
+/*
+ * Freescale MPC85xx Memory Controller kenel module
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#ifndef _MPC85XX_EDAC_H_
+#define _MPC85XX_EDAC_H_
+
+#define MPC85XX_REVISION " Ver: 2.0.0 " __DATE__
+#define EDAC_MOD_STR	"MPC85xx_edac"
+
+#define mpc85xx_printk(level, fmt, arg...) \
+	edac_printk(level, "MPC85xx", fmt, ##arg)
+
+#define mpc85xx_mc_printk(mci, level, fmt, arg...) \
+	edac_mc_chipset_printk(mci, level, "MPC85xx", fmt, ##arg)
+
+/*
+ * DRAM error defines
+ */
+
+/* DDR_SDRAM_CFG */
+#define MPC85XX_MC_DDR_SDRAM_CFG	0x0110
+#define MPC85XX_MC_CS_BNDS_0		0x0000
+#define MPC85XX_MC_CS_BNDS_1		0x0008
+#define MPC85XX_MC_CS_BNDS_2		0x0010
+#define MPC85XX_MC_CS_BNDS_3		0x0018
+#define MPC85XX_MC_CS_BNDS_OFS		0x0008
+
+#define MPC85XX_MC_DATA_ERR_INJECT_HI	0x0e00
+#define MPC85XX_MC_DATA_ERR_INJECT_LO	0x0e04
+#define MPC85XX_MC_ECC_ERR_INJECT	0x0e08
+#define MPC85XX_MC_CAPTURE_DATA_HI	0x0e20
+#define MPC85XX_MC_CAPTURE_DATA_LO	0x0e24
+#define MPC85XX_MC_CAPTURE_ECC		0x0e28
+#define MPC85XX_MC_ERR_DETECT		0x0e40
+#define MPC85XX_MC_ERR_DISABLE		0x0e44
+#define MPC85XX_MC_ERR_INT_EN		0x0e48
+#define MPC85XX_MC_CAPTURE_ATRIBUTES	0x0e4c
+#define MPC85XX_MC_CAPTURE_ADDRESS	0x0e50
+#define MPC85XX_MC_ERR_SBE		0x0e58
+
+#define DSC_MEM_EN	0x80000000
+#define DSC_ECC_EN	0x20000000
+#define DSC_RD_EN	0x10000000
+
+#define DSC_SDTYPE_MASK		0x07000000
+
+#define DSC_SDTYPE_DDR		0x02000000
+#define DSC_SDTYPE_DDR2		0x03000000
+#define DSC_SDTYPE_DDR3		0x07000000
+#define DSC_X32_EN	0x00000020
+
+/* Err_Int_En */
+#define DDR_EIE_MSEE	0x1	/* memory select */
+#define DDR_EIE_SBEE	0x4	/* single-bit ECC error */
+#define DDR_EIE_MBEE	0x8	/* multi-bit ECC error */
+
+/* Err_Detect */
+#define DDR_EDE_MSE		0x1	/* memory select */
+#define DDR_EDE_SBE		0x4	/* single-bit ECC error */
+#define DDR_EDE_MBE		0x8	/* multi-bit ECC error */
+#define DDR_EDE_MME		0x80000000	/* multiple memory errors */
+
+/* Err_Disable */
+#define DDR_EDI_MSED	0x1	/* memory select disable */
+#define	DDR_EDI_SBED	0x4	/* single-bit ECC error disable */
+#define	DDR_EDI_MBED	0x8	/* multi-bit ECC error disable */
+
+/*
+ * L2 Err defines
+ */
+#define MPC85XX_L2_ERRINJHI	0x0000
+#define MPC85XX_L2_ERRINJLO	0x0004
+#define MPC85XX_L2_ERRINJCTL	0x0008
+#define MPC85XX_L2_CAPTDATAHI	0x0020
+#define MPC85XX_L2_CAPTDATALO	0x0024
+#define MPC85XX_L2_CAPTECC	0x0028
+#define MPC85XX_L2_ERRDET	0x0040
+#define MPC85XX_L2_ERRDIS	0x0044
+#define MPC85XX_L2_ERRINTEN	0x0048
+#define MPC85XX_L2_ERRATTR	0x004c
+#define MPC85XX_L2_ERRADDR	0x0050
+#define MPC85XX_L2_ERRCTL	0x0058
+
+/* Error Interrupt Enable */
+#define L2_EIE_L2CFGINTEN	0x1
+#define L2_EIE_SBECCINTEN	0x4
+#define L2_EIE_MBECCINTEN	0x8
+#define L2_EIE_TPARINTEN	0x10
+#define L2_EIE_MASK	(L2_EIE_L2CFGINTEN | L2_EIE_SBECCINTEN | \
+			L2_EIE_MBECCINTEN | L2_EIE_TPARINTEN)
+
+/* Error Detect */
+#define L2_EDE_L2CFGERR		0x1
+#define L2_EDE_SBECCERR		0x4
+#define L2_EDE_MBECCERR		0x8
+#define L2_EDE_TPARERR		0x10
+#define L2_EDE_MULL2ERR		0x80000000
+
+#define L2_EDE_CE_MASK	L2_EDE_SBECCERR
+#define L2_EDE_UE_MASK	(L2_EDE_L2CFGERR | L2_EDE_MBECCERR | \
+			L2_EDE_TPARERR)
+#define L2_EDE_MASK	(L2_EDE_L2CFGERR | L2_EDE_SBECCERR | \
+			L2_EDE_MBECCERR | L2_EDE_TPARERR | L2_EDE_MULL2ERR)
+
+/*
+ * PCI Err defines
+ */
+#define PCI_EDE_TOE			0x00000001
+#define PCI_EDE_SCM			0x00000002
+#define PCI_EDE_IRMSV			0x00000004
+#define PCI_EDE_ORMSV			0x00000008
+#define PCI_EDE_OWMSV			0x00000010
+#define PCI_EDE_TGT_ABRT		0x00000020
+#define PCI_EDE_MST_ABRT		0x00000040
+#define PCI_EDE_TGT_PERR		0x00000080
+#define PCI_EDE_MST_PERR		0x00000100
+#define PCI_EDE_RCVD_SERR		0x00000200
+#define PCI_EDE_ADDR_PERR		0x00000400
+#define PCI_EDE_MULTI_ERR		0x80000000
+
+#define PCI_EDE_PERR_MASK	(PCI_EDE_TGT_PERR | PCI_EDE_MST_PERR | \
+				PCI_EDE_ADDR_PERR)
+
+#define MPC85XX_PCI_ERR_DR		0x0000
+#define MPC85XX_PCI_ERR_CAP_DR		0x0004
+#define MPC85XX_PCI_ERR_EN		0x0008
+#define MPC85XX_PCI_ERR_ATTRIB		0x000c
+#define MPC85XX_PCI_ERR_ADDR		0x0010
+#define MPC85XX_PCI_ERR_EXT_ADDR	0x0014
+#define MPC85XX_PCI_ERR_DL		0x0018
+#define MPC85XX_PCI_ERR_DH		0x001c
+#define MPC85XX_PCI_GAS_TIMR		0x0020
+#define MPC85XX_PCI_PCIX_TIMR		0x0024
+
+struct mpc85xx_mc_pdata {
+	char *name;
+	int edac_idx;
+	void __iomem *mc_vbase;
+	int irq;
+};
+
+struct mpc85xx_l2_pdata {
+	char *name;
+	int edac_idx;
+	void __iomem *l2_vbase;
+	int irq;
+};
+
+struct mpc85xx_pci_pdata {
+	char *name;
+	int edac_idx;
+	void __iomem *pci_vbase;
+	int irq;
+};
+
+#endif
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
new file mode 100644
index 0000000..a6b9fec
--- /dev/null
+++ b/drivers/edac/mv64x60_edac.c
@@ -0,0 +1,890 @@
+/*
+ * Marvell MV64x60 Memory Controller kernel module for PPC platforms
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/edac.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+#include "mv64x60_edac.h"
+
+static const char *mv64x60_ctl_name = "MV64x60";
+static int edac_dev_idx;
+static int edac_pci_idx;
+static int edac_mc_idx;
+
+/*********************** PCI err device **********************************/
+#ifdef CONFIG_PCI
+static void mv64x60_pci_check(struct edac_pci_ctl_info *pci)
+{
+	struct mv64x60_pci_pdata *pdata = pci->pvt_info;
+	u32 cause;
+
+	cause = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
+	if (!cause)
+		return;
+
+	printk(KERN_ERR "Error in PCI %d Interface\n", pdata->pci_hose);
+	printk(KERN_ERR "Cause register: 0x%08x\n", cause);
+	printk(KERN_ERR "Address Low: 0x%08x\n",
+	       in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_LO));
+	printk(KERN_ERR "Address High: 0x%08x\n",
+	       in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_HI));
+	printk(KERN_ERR "Attribute: 0x%08x\n",
+	       in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ATTR));
+	printk(KERN_ERR "Command: 0x%08x\n",
+	       in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CMD));
+	out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, ~cause);
+
+	if (cause & MV64X60_PCI_PE_MASK)
+		edac_pci_handle_pe(pci, pci->ctl_name);
+
+	if (!(cause & MV64X60_PCI_PE_MASK))
+		edac_pci_handle_npe(pci, pci->ctl_name);
+}
+
+static irqreturn_t mv64x60_pci_isr(int irq, void *dev_id)
+{
+	struct edac_pci_ctl_info *pci = dev_id;
+	struct mv64x60_pci_pdata *pdata = pci->pvt_info;
+	u32 val;
+
+	val = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
+	if (!val)
+		return IRQ_NONE;
+
+	mv64x60_pci_check(pci);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Bit 0 of MV64x60_PCIx_ERR_MASK does not exist on the 64360 and because of
+ * errata FEr-#11 and FEr-##16 for the 64460, it should be 0 on that chip as
+ * well.  IOW, don't set bit 0.
+ */
+
+/* Erratum FEr PCI-#16: clear bit 0 of PCI SERRn Mask reg. */
+static int __init mv64x60_pci_fixup(struct platform_device *pdev)
+{
+	struct resource *r;
+	void __iomem *pci_serr;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!r) {
+		printk(KERN_ERR "%s: Unable to get resource for "
+		       "PCI err regs\n", __func__);
+		return -ENOENT;
+	}
+
+	pci_serr = ioremap(r->start, resource_size(r));
+	if (!pci_serr)
+		return -ENOMEM;
+
+	out_le32(pci_serr, in_le32(pci_serr) & ~0x1);
+	iounmap(pci_serr);
+
+	return 0;
+}
+
+static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
+{
+	struct edac_pci_ctl_info *pci;
+	struct mv64x60_pci_pdata *pdata;
+	struct resource *r;
+	int res = 0;
+
+	if (!devres_open_group(&pdev->dev, mv64x60_pci_err_probe, GFP_KERNEL))
+		return -ENOMEM;
+
+	pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mv64x60_pci_err");
+	if (!pci)
+		return -ENOMEM;
+
+	pdata = pci->pvt_info;
+
+	pdata->pci_hose = pdev->id;
+	pdata->name = "mpc85xx_pci_err";
+	pdata->irq = NO_IRQ;
+	platform_set_drvdata(pdev, pci);
+	pci->dev = &pdev->dev;
+	pci->dev_name = dev_name(&pdev->dev);
+	pci->mod_name = EDAC_MOD_STR;
+	pci->ctl_name = pdata->name;
+
+	if (edac_op_state == EDAC_OPSTATE_POLL)
+		pci->edac_check = mv64x60_pci_check;
+
+	pdata->edac_idx = edac_pci_idx++;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r) {
+		printk(KERN_ERR "%s: Unable to get resource for "
+		       "PCI err regs\n", __func__);
+		res = -ENOENT;
+		goto err;
+	}
+
+	if (!devm_request_mem_region(&pdev->dev,
+				     r->start,
+				     resource_size(r),
+				     pdata->name)) {
+		printk(KERN_ERR "%s: Error while requesting mem region\n",
+		       __func__);
+		res = -EBUSY;
+		goto err;
+	}
+
+	pdata->pci_vbase = devm_ioremap(&pdev->dev,
+					r->start,
+					resource_size(r));
+	if (!pdata->pci_vbase) {
+		printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
+		res = -ENOMEM;
+		goto err;
+	}
+
+	res = mv64x60_pci_fixup(pdev);
+	if (res < 0) {
+		printk(KERN_ERR "%s: PCI fixup failed\n", __func__);
+		goto err;
+	}
+
+	out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, 0);
+	out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 0);
+	out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK,
+		 MV64X60_PCIx_ERR_MASK_VAL);
+
+	if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
+		debugf3("%s(): failed edac_pci_add_device()\n", __func__);
+		goto err;
+	}
+
+	if (edac_op_state == EDAC_OPSTATE_INT) {
+		pdata->irq = platform_get_irq(pdev, 0);
+		res = devm_request_irq(&pdev->dev,
+				       pdata->irq,
+				       mv64x60_pci_isr,
+				       IRQF_DISABLED,
+				       "[EDAC] PCI err",
+				       pci);
+		if (res < 0) {
+			printk(KERN_ERR "%s: Unable to request irq %d for "
+			       "MV64x60 PCI ERR\n", __func__, pdata->irq);
+			res = -ENODEV;
+			goto err2;
+		}
+		printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
+		       pdata->irq);
+	}
+
+	devres_remove_group(&pdev->dev, mv64x60_pci_err_probe);
+
+	/* get this far and it's successful */
+	debugf3("%s(): success\n", __func__);
+
+	return 0;
+
+err2:
+	edac_pci_del_device(&pdev->dev);
+err:
+	edac_pci_free_ctl_info(pci);
+	devres_release_group(&pdev->dev, mv64x60_pci_err_probe);
+	return res;
+}
+
+static int mv64x60_pci_err_remove(struct platform_device *pdev)
+{
+	struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
+
+	debugf0("%s()\n", __func__);
+
+	edac_pci_del_device(&pdev->dev);
+
+	edac_pci_free_ctl_info(pci);
+
+	return 0;
+}
+
+static struct platform_driver mv64x60_pci_err_driver = {
+	.probe = mv64x60_pci_err_probe,
+	.remove = __devexit_p(mv64x60_pci_err_remove),
+	.driver = {
+		   .name = "mv64x60_pci_err",
+	}
+};
+
+#endif /* CONFIG_PCI */
+
+/*********************** SRAM err device **********************************/
+static void mv64x60_sram_check(struct edac_device_ctl_info *edac_dev)
+{
+	struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
+	u32 cause;
+
+	cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
+	if (!cause)
+		return;
+
+	printk(KERN_ERR "Error in internal SRAM\n");
+	printk(KERN_ERR "Cause register: 0x%08x\n", cause);
+	printk(KERN_ERR "Address Low: 0x%08x\n",
+	       in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_LO));
+	printk(KERN_ERR "Address High: 0x%08x\n",
+	       in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_HI));
+	printk(KERN_ERR "Data Low: 0x%08x\n",
+	       in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_LO));
+	printk(KERN_ERR "Data High: 0x%08x\n",
+	       in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_HI));
+	printk(KERN_ERR "Parity: 0x%08x\n",
+	       in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_PARITY));
+	out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
+
+	edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+static irqreturn_t mv64x60_sram_isr(int irq, void *dev_id)
+{
+	struct edac_device_ctl_info *edac_dev = dev_id;
+	struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
+	u32 cause;
+
+	cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
+	if (!cause)
+		return IRQ_NONE;
+
+	mv64x60_sram_check(edac_dev);
+
+	return IRQ_HANDLED;
+}
+
+static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev)
+{
+	struct edac_device_ctl_info *edac_dev;
+	struct mv64x60_sram_pdata *pdata;
+	struct resource *r;
+	int res = 0;
+
+	if (!devres_open_group(&pdev->dev, mv64x60_sram_err_probe, GFP_KERNEL))
+		return -ENOMEM;
+
+	edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
+					      "sram", 1, NULL, 0, 0, NULL, 0,
+					      edac_dev_idx);
+	if (!edac_dev) {
+		devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
+		return -ENOMEM;
+	}
+
+	pdata = edac_dev->pvt_info;
+	pdata->name = "mv64x60_sram_err";
+	pdata->irq = NO_IRQ;
+	edac_dev->dev = &pdev->dev;
+	platform_set_drvdata(pdev, edac_dev);
+	edac_dev->dev_name = dev_name(&pdev->dev);
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r) {
+		printk(KERN_ERR "%s: Unable to get resource for "
+		       "SRAM err regs\n", __func__);
+		res = -ENOENT;
+		goto err;
+	}
+
+	if (!devm_request_mem_region(&pdev->dev,
+				     r->start,
+				     resource_size(r),
+				     pdata->name)) {
+		printk(KERN_ERR "%s: Error while request mem region\n",
+		       __func__);
+		res = -EBUSY;
+		goto err;
+	}
+
+	pdata->sram_vbase = devm_ioremap(&pdev->dev,
+					 r->start,
+					 resource_size(r));
+	if (!pdata->sram_vbase) {
+		printk(KERN_ERR "%s: Unable to setup SRAM err regs\n",
+		       __func__);
+		res = -ENOMEM;
+		goto err;
+	}
+
+	/* setup SRAM err registers */
+	out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
+
+	edac_dev->mod_name = EDAC_MOD_STR;
+	edac_dev->ctl_name = pdata->name;
+
+	if (edac_op_state == EDAC_OPSTATE_POLL)
+		edac_dev->edac_check = mv64x60_sram_check;
+
+	pdata->edac_idx = edac_dev_idx++;
+
+	if (edac_device_add_device(edac_dev) > 0) {
+		debugf3("%s(): failed edac_device_add_device()\n", __func__);
+		goto err;
+	}
+
+	if (edac_op_state == EDAC_OPSTATE_INT) {
+		pdata->irq = platform_get_irq(pdev, 0);
+		res = devm_request_irq(&pdev->dev,
+				       pdata->irq,
+				       mv64x60_sram_isr,
+				       IRQF_DISABLED,
+				       "[EDAC] SRAM err",
+				       edac_dev);
+		if (res < 0) {
+			printk(KERN_ERR
+			       "%s: Unable to request irq %d for "
+			       "MV64x60 SRAM ERR\n", __func__, pdata->irq);
+			res = -ENODEV;
+			goto err2;
+		}
+
+		printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for SRAM Err\n",
+		       pdata->irq);
+	}
+
+	devres_remove_group(&pdev->dev, mv64x60_sram_err_probe);
+
+	/* get this far and it's successful */
+	debugf3("%s(): success\n", __func__);
+
+	return 0;
+
+err2:
+	edac_device_del_device(&pdev->dev);
+err:
+	devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
+	edac_device_free_ctl_info(edac_dev);
+	return res;
+}
+
+static int mv64x60_sram_err_remove(struct platform_device *pdev)
+{
+	struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
+
+	debugf0("%s()\n", __func__);
+
+	edac_device_del_device(&pdev->dev);
+	edac_device_free_ctl_info(edac_dev);
+
+	return 0;
+}
+
+static struct platform_driver mv64x60_sram_err_driver = {
+	.probe = mv64x60_sram_err_probe,
+	.remove = mv64x60_sram_err_remove,
+	.driver = {
+		   .name = "mv64x60_sram_err",
+	}
+};
+
+/*********************** CPU err device **********************************/
+static void mv64x60_cpu_check(struct edac_device_ctl_info *edac_dev)
+{
+	struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
+	u32 cause;
+
+	cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
+	    MV64x60_CPU_CAUSE_MASK;
+	if (!cause)
+		return;
+
+	printk(KERN_ERR "Error on CPU interface\n");
+	printk(KERN_ERR "Cause register: 0x%08x\n", cause);
+	printk(KERN_ERR "Address Low: 0x%08x\n",
+	       in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_LO));
+	printk(KERN_ERR "Address High: 0x%08x\n",
+	       in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_HI));
+	printk(KERN_ERR "Data Low: 0x%08x\n",
+	       in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_LO));
+	printk(KERN_ERR "Data High: 0x%08x\n",
+	       in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_HI));
+	printk(KERN_ERR "Parity: 0x%08x\n",
+	       in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_PARITY));
+	out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
+
+	edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+static irqreturn_t mv64x60_cpu_isr(int irq, void *dev_id)
+{
+	struct edac_device_ctl_info *edac_dev = dev_id;
+	struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
+	u32 cause;
+
+	cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
+	    MV64x60_CPU_CAUSE_MASK;
+	if (!cause)
+		return IRQ_NONE;
+
+	mv64x60_cpu_check(edac_dev);
+
+	return IRQ_HANDLED;
+}
+
+static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
+{
+	struct edac_device_ctl_info *edac_dev;
+	struct resource *r;
+	struct mv64x60_cpu_pdata *pdata;
+	int res = 0;
+
+	if (!devres_open_group(&pdev->dev, mv64x60_cpu_err_probe, GFP_KERNEL))
+		return -ENOMEM;
+
+	edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
+					      "cpu", 1, NULL, 0, 0, NULL, 0,
+					      edac_dev_idx);
+	if (!edac_dev) {
+		devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
+		return -ENOMEM;
+	}
+
+	pdata = edac_dev->pvt_info;
+	pdata->name = "mv64x60_cpu_err";
+	pdata->irq = NO_IRQ;
+	edac_dev->dev = &pdev->dev;
+	platform_set_drvdata(pdev, edac_dev);
+	edac_dev->dev_name = dev_name(&pdev->dev);
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r) {
+		printk(KERN_ERR "%s: Unable to get resource for "
+		       "CPU err regs\n", __func__);
+		res = -ENOENT;
+		goto err;
+	}
+
+	if (!devm_request_mem_region(&pdev->dev,
+				     r->start,
+				     resource_size(r),
+				     pdata->name)) {
+		printk(KERN_ERR "%s: Error while requesting mem region\n",
+		       __func__);
+		res = -EBUSY;
+		goto err;
+	}
+
+	pdata->cpu_vbase[0] = devm_ioremap(&pdev->dev,
+					   r->start,
+					   resource_size(r));
+	if (!pdata->cpu_vbase[0]) {
+		printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
+		res = -ENOMEM;
+		goto err;
+	}
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!r) {
+		printk(KERN_ERR "%s: Unable to get resource for "
+		       "CPU err regs\n", __func__);
+		res = -ENOENT;
+		goto err;
+	}
+
+	if (!devm_request_mem_region(&pdev->dev,
+				     r->start,
+				     resource_size(r),
+				     pdata->name)) {
+		printk(KERN_ERR "%s: Error while requesting mem region\n",
+		       __func__);
+		res = -EBUSY;
+		goto err;
+	}
+
+	pdata->cpu_vbase[1] = devm_ioremap(&pdev->dev,
+					   r->start,
+					   resource_size(r));
+	if (!pdata->cpu_vbase[1]) {
+		printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
+		res = -ENOMEM;
+		goto err;
+	}
+
+	/* setup CPU err registers */
+	out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
+	out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0);
+	out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0x000000ff);
+
+	edac_dev->mod_name = EDAC_MOD_STR;
+	edac_dev->ctl_name = pdata->name;
+	if (edac_op_state == EDAC_OPSTATE_POLL)
+		edac_dev->edac_check = mv64x60_cpu_check;
+
+	pdata->edac_idx = edac_dev_idx++;
+
+	if (edac_device_add_device(edac_dev) > 0) {
+		debugf3("%s(): failed edac_device_add_device()\n", __func__);
+		goto err;
+	}
+
+	if (edac_op_state == EDAC_OPSTATE_INT) {
+		pdata->irq = platform_get_irq(pdev, 0);
+		res = devm_request_irq(&pdev->dev,
+				       pdata->irq,
+				       mv64x60_cpu_isr,
+				       IRQF_DISABLED,
+				       "[EDAC] CPU err",
+				       edac_dev);
+		if (res < 0) {
+			printk(KERN_ERR
+			       "%s: Unable to request irq %d for MV64x60 "
+			       "CPU ERR\n", __func__, pdata->irq);
+			res = -ENODEV;
+			goto err2;
+		}
+
+		printk(KERN_INFO EDAC_MOD_STR
+		       " acquired irq %d for CPU Err\n", pdata->irq);
+	}
+
+	devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe);
+
+	/* get this far and it's successful */
+	debugf3("%s(): success\n", __func__);
+
+	return 0;
+
+err2:
+	edac_device_del_device(&pdev->dev);
+err:
+	devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
+	edac_device_free_ctl_info(edac_dev);
+	return res;
+}
+
+static int mv64x60_cpu_err_remove(struct platform_device *pdev)
+{
+	struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
+
+	debugf0("%s()\n", __func__);
+
+	edac_device_del_device(&pdev->dev);
+	edac_device_free_ctl_info(edac_dev);
+	return 0;
+}
+
+static struct platform_driver mv64x60_cpu_err_driver = {
+	.probe = mv64x60_cpu_err_probe,
+	.remove = mv64x60_cpu_err_remove,
+	.driver = {
+		   .name = "mv64x60_cpu_err",
+	}
+};
+
+/*********************** DRAM err device **********************************/
+
+static void mv64x60_mc_check(struct mem_ctl_info *mci)
+{
+	struct mv64x60_mc_pdata *pdata = mci->pvt_info;
+	u32 reg;
+	u32 err_addr;
+	u32 sdram_ecc;
+	u32 comp_ecc;
+	u32 syndrome;
+
+	reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
+	if (!reg)
+		return;
+
+	err_addr = reg & ~0x3;
+	sdram_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_RCVD);
+	comp_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CALC);
+	syndrome = sdram_ecc ^ comp_ecc;
+
+	/* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
+	if (!(reg & 0x1))
+		edac_mc_handle_ce(mci, err_addr >> PAGE_SHIFT,
+				  err_addr & PAGE_MASK, syndrome, 0, 0,
+				  mci->ctl_name);
+	else	/* 2 bit error, UE */
+		edac_mc_handle_ue(mci, err_addr >> PAGE_SHIFT,
+				  err_addr & PAGE_MASK, 0, mci->ctl_name);
+
+	/* clear the error */
+	out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
+}
+
+static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id)
+{
+	struct mem_ctl_info *mci = dev_id;
+	struct mv64x60_mc_pdata *pdata = mci->pvt_info;
+	u32 reg;
+
+	reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
+	if (!reg)
+		return IRQ_NONE;
+
+	/* writing 0's to the ECC err addr in check function clears irq */
+	mv64x60_mc_check(mci);
+
+	return IRQ_HANDLED;
+}
+
+static void get_total_mem(struct mv64x60_mc_pdata *pdata)
+{
+	struct device_node *np = NULL;
+	const unsigned int *reg;
+
+	np = of_find_node_by_type(NULL, "memory");
+	if (!np)
+		return;
+
+	reg = of_get_property(np, "reg", NULL);
+
+	pdata->total_mem = reg[1];
+}
+
+static void mv64x60_init_csrows(struct mem_ctl_info *mci,
+				struct mv64x60_mc_pdata *pdata)
+{
+	struct csrow_info *csrow;
+	u32 devtype;
+	u32 ctl;
+
+	get_total_mem(pdata);
+
+	ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
+
+	csrow = &mci->csrows[0];
+	csrow->first_page = 0;
+	csrow->nr_pages = pdata->total_mem >> PAGE_SHIFT;
+	csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+	csrow->grain = 8;
+
+	csrow->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
+
+	devtype = (ctl >> 20) & 0x3;
+	switch (devtype) {
+	case 0x0:
+		csrow->dtype = DEV_X32;
+		break;
+	case 0x2:		/* could be X8 too, but no way to tell */
+		csrow->dtype = DEV_X16;
+		break;
+	case 0x3:
+		csrow->dtype = DEV_X4;
+		break;
+	default:
+		csrow->dtype = DEV_UNKNOWN;
+		break;
+	}
+
+	csrow->edac_mode = EDAC_SECDED;
+}
+
+static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
+{
+	struct mem_ctl_info *mci;
+	struct mv64x60_mc_pdata *pdata;
+	struct resource *r;
+	u32 ctl;
+	int res = 0;
+
+	if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
+		return -ENOMEM;
+
+	mci = edac_mc_alloc(sizeof(struct mv64x60_mc_pdata), 1, 1, edac_mc_idx);
+	if (!mci) {
+		printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
+		devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
+		return -ENOMEM;
+	}
+
+	pdata = mci->pvt_info;
+	mci->dev = &pdev->dev;
+	platform_set_drvdata(pdev, mci);
+	pdata->name = "mv64x60_mc_err";
+	pdata->irq = NO_IRQ;
+	mci->dev_name = dev_name(&pdev->dev);
+	pdata->edac_idx = edac_mc_idx++;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r) {
+		printk(KERN_ERR "%s: Unable to get resource for "
+		       "MC err regs\n", __func__);
+		res = -ENOENT;
+		goto err;
+	}
+
+	if (!devm_request_mem_region(&pdev->dev,
+				     r->start,
+				     resource_size(r),
+				     pdata->name)) {
+		printk(KERN_ERR "%s: Error while requesting mem region\n",
+		       __func__);
+		res = -EBUSY;
+		goto err;
+	}
+
+	pdata->mc_vbase = devm_ioremap(&pdev->dev,
+				       r->start,
+				       resource_size(r));
+	if (!pdata->mc_vbase) {
+		printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
+		res = -ENOMEM;
+		goto err;
+	}
+
+	ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
+	if (!(ctl & MV64X60_SDRAM_ECC)) {
+		/* Non-ECC RAM? */
+		printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
+		res = -ENODEV;
+		goto err2;
+	}
+
+	debugf3("%s(): init mci\n", __func__);
+	mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+	mci->edac_cap = EDAC_FLAG_SECDED;
+	mci->mod_name = EDAC_MOD_STR;
+	mci->mod_ver = MV64x60_REVISION;
+	mci->ctl_name = mv64x60_ctl_name;
+
+	if (edac_op_state == EDAC_OPSTATE_POLL)
+		mci->edac_check = mv64x60_mc_check;
+
+	mci->ctl_page_to_phys = NULL;
+
+	mci->scrub_mode = SCRUB_SW_SRC;
+
+	mv64x60_init_csrows(mci, pdata);
+
+	/* setup MC registers */
+	out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
+	ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
+	ctl = (ctl & 0xff00ffff) | 0x10000;
+	out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl);
+
+	if (edac_mc_add_mc(mci)) {
+		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+		goto err;
+	}
+
+	if (edac_op_state == EDAC_OPSTATE_INT) {
+		/* acquire interrupt that reports errors */
+		pdata->irq = platform_get_irq(pdev, 0);
+		res = devm_request_irq(&pdev->dev,
+				       pdata->irq,
+				       mv64x60_mc_isr,
+				       IRQF_DISABLED,
+				       "[EDAC] MC err",
+				       mci);
+		if (res < 0) {
+			printk(KERN_ERR "%s: Unable to request irq %d for "
+			       "MV64x60 DRAM ERR\n", __func__, pdata->irq);
+			res = -ENODEV;
+			goto err2;
+		}
+
+		printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC Err\n",
+		       pdata->irq);
+	}
+
+	/* get this far and it's successful */
+	debugf3("%s(): success\n", __func__);
+
+	return 0;
+
+err2:
+	edac_mc_del_mc(&pdev->dev);
+err:
+	devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
+	edac_mc_free(mci);
+	return res;
+}
+
+static int mv64x60_mc_err_remove(struct platform_device *pdev)
+{
+	struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+	debugf0("%s()\n", __func__);
+
+	edac_mc_del_mc(&pdev->dev);
+	edac_mc_free(mci);
+	return 0;
+}
+
+static struct platform_driver mv64x60_mc_err_driver = {
+	.probe = mv64x60_mc_err_probe,
+	.remove = mv64x60_mc_err_remove,
+	.driver = {
+		   .name = "mv64x60_mc_err",
+	}
+};
+
+static int __init mv64x60_edac_init(void)
+{
+	int ret = 0;
+
+	printk(KERN_INFO "Marvell MV64x60 EDAC driver " MV64x60_REVISION "\n");
+	printk(KERN_INFO "\t(C) 2006-2007 MontaVista Software\n");
+	/* make sure error reporting method is sane */
+	switch (edac_op_state) {
+	case EDAC_OPSTATE_POLL:
+	case EDAC_OPSTATE_INT:
+		break;
+	default:
+		edac_op_state = EDAC_OPSTATE_INT;
+		break;
+	}
+
+	ret = platform_driver_register(&mv64x60_mc_err_driver);
+	if (ret)
+		printk(KERN_WARNING EDAC_MOD_STR "MC err failed to register\n");
+
+	ret = platform_driver_register(&mv64x60_cpu_err_driver);
+	if (ret)
+		printk(KERN_WARNING EDAC_MOD_STR
+			"CPU err failed to register\n");
+
+	ret = platform_driver_register(&mv64x60_sram_err_driver);
+	if (ret)
+		printk(KERN_WARNING EDAC_MOD_STR
+			"SRAM err failed to register\n");
+
+#ifdef CONFIG_PCI
+	ret = platform_driver_register(&mv64x60_pci_err_driver);
+	if (ret)
+		printk(KERN_WARNING EDAC_MOD_STR
+			"PCI err failed to register\n");
+#endif
+
+	return ret;
+}
+module_init(mv64x60_edac_init);
+
+static void __exit mv64x60_edac_exit(void)
+{
+#ifdef CONFIG_PCI
+	platform_driver_unregister(&mv64x60_pci_err_driver);
+#endif
+	platform_driver_unregister(&mv64x60_sram_err_driver);
+	platform_driver_unregister(&mv64x60_cpu_err_driver);
+	platform_driver_unregister(&mv64x60_mc_err_driver);
+}
+module_exit(mv64x60_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Montavista Software, Inc.");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state,
+		 "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
diff --git a/drivers/edac/mv64x60_edac.h b/drivers/edac/mv64x60_edac.h
new file mode 100644
index 0000000..e042e2d
--- /dev/null
+++ b/drivers/edac/mv64x60_edac.h
@@ -0,0 +1,114 @@
+/*
+ * EDAC defs for Marvell MV64x60 bridge chip
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#ifndef _MV64X60_EDAC_H_
+#define _MV64X60_EDAC_H_
+
+#define MV64x60_REVISION " Ver: 2.0.0 " __DATE__
+#define EDAC_MOD_STR	"MV64x60_edac"
+
+#define mv64x60_printk(level, fmt, arg...) \
+	edac_printk(level, "MV64x60", fmt, ##arg)
+
+#define mv64x60_mc_printk(mci, level, fmt, arg...) \
+	edac_mc_chipset_printk(mci, level, "MV64x60", fmt, ##arg)
+
+/* CPU Error Report Registers */
+#define MV64x60_CPU_ERR_ADDR_LO		0x00	/* 0x0070 */
+#define MV64x60_CPU_ERR_ADDR_HI		0x08	/* 0x0078 */
+#define MV64x60_CPU_ERR_DATA_LO		0x00	/* 0x0128 */
+#define MV64x60_CPU_ERR_DATA_HI		0x08	/* 0x0130 */
+#define MV64x60_CPU_ERR_PARITY		0x10	/* 0x0138 */
+#define MV64x60_CPU_ERR_CAUSE		0x18	/* 0x0140 */
+#define MV64x60_CPU_ERR_MASK		0x20	/* 0x0148 */
+
+#define MV64x60_CPU_CAUSE_MASK		0x07ffffff
+
+/* SRAM Error Report Registers */
+#define MV64X60_SRAM_ERR_CAUSE		0x08	/* 0x0388 */
+#define MV64X60_SRAM_ERR_ADDR_LO	0x10	/* 0x0390 */
+#define MV64X60_SRAM_ERR_ADDR_HI	0x78	/* 0x03f8 */
+#define MV64X60_SRAM_ERR_DATA_LO	0x18	/* 0x0398 */
+#define MV64X60_SRAM_ERR_DATA_HI	0x20	/* 0x03a0 */
+#define MV64X60_SRAM_ERR_PARITY		0x28	/* 0x03a8 */
+
+/* SDRAM Controller Registers */
+#define MV64X60_SDRAM_CONFIG		0x00	/* 0x1400 */
+#define MV64X60_SDRAM_ERR_DATA_HI	0x40	/* 0x1440 */
+#define MV64X60_SDRAM_ERR_DATA_LO	0x44	/* 0x1444 */
+#define MV64X60_SDRAM_ERR_ECC_RCVD	0x48	/* 0x1448 */
+#define MV64X60_SDRAM_ERR_ECC_CALC	0x4c	/* 0x144c */
+#define MV64X60_SDRAM_ERR_ADDR		0x50	/* 0x1450 */
+#define MV64X60_SDRAM_ERR_ECC_CNTL	0x54	/* 0x1454 */
+#define MV64X60_SDRAM_ERR_ECC_ERR_CNT	0x58	/* 0x1458 */
+
+#define MV64X60_SDRAM_REGISTERED	0x20000
+#define MV64X60_SDRAM_ECC		0x40000
+
+#ifdef CONFIG_PCI
+/*
+ * Bit 0 of MV64x60_PCIx_ERR_MASK does not exist on the 64360 and because of
+ * errata FEr-#11 and FEr-##16 for the 64460, it should be 0 on that chip as
+ * well.  IOW, don't set bit 0.
+ */
+#define MV64X60_PCIx_ERR_MASK_VAL	0x00a50c24
+
+/* Register offsets from PCIx error address low register */
+#define MV64X60_PCI_ERROR_ADDR_LO	0x00
+#define MV64X60_PCI_ERROR_ADDR_HI	0x04
+#define MV64X60_PCI_ERROR_ATTR		0x08
+#define MV64X60_PCI_ERROR_CMD		0x10
+#define MV64X60_PCI_ERROR_CAUSE		0x18
+#define MV64X60_PCI_ERROR_MASK		0x1c
+
+#define MV64X60_PCI_ERR_SWrPerr		0x0002
+#define MV64X60_PCI_ERR_SRdPerr		0x0004
+#define	MV64X60_PCI_ERR_MWrPerr		0x0020
+#define MV64X60_PCI_ERR_MRdPerr		0x0040
+
+#define MV64X60_PCI_PE_MASK	(MV64X60_PCI_ERR_SWrPerr | \
+				MV64X60_PCI_ERR_SRdPerr | \
+				MV64X60_PCI_ERR_MWrPerr | \
+				MV64X60_PCI_ERR_MRdPerr)
+
+struct mv64x60_pci_pdata {
+	int pci_hose;
+	void __iomem *pci_vbase;
+	char *name;
+	int irq;
+	int edac_idx;
+};
+
+#endif				/* CONFIG_PCI */
+
+struct mv64x60_mc_pdata {
+	void __iomem *mc_vbase;
+	int total_mem;
+	char *name;
+	int irq;
+	int edac_idx;
+};
+
+struct mv64x60_cpu_pdata {
+	void __iomem *cpu_vbase[2];
+	char *name;
+	int irq;
+	int edac_idx;
+};
+
+struct mv64x60_sram_pdata {
+	void __iomem *sram_vbase;
+	char *name;
+	int irq;
+	int edac_idx;
+};
+
+#endif
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
new file mode 100644
index 0000000..8e6b91b
--- /dev/null
+++ b/drivers/edac/pasemi_edac.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Author: Egor Martovetsky <egor@pasemi.com>
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Driver for the PWRficient onchip memory controllers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define MODULE_NAME "pasemi_edac"
+
+#define MCCFG_MCEN				0x300
+#define   MCCFG_MCEN_MMC_EN			0x00000001
+#define MCCFG_ERRCOR				0x388
+#define   MCCFG_ERRCOR_RNK_FAIL_DET_EN		0x00000100
+#define   MCCFG_ERRCOR_ECC_GEN_EN		0x00000010
+#define   MCCFG_ERRCOR_ECC_CRR_EN		0x00000001
+#define MCCFG_SCRUB				0x384
+#define   MCCFG_SCRUB_RGLR_SCRB_EN		0x00000001
+#define MCDEBUG_ERRCTL1				0x728
+#define   MCDEBUG_ERRCTL1_RFL_LOG_EN		0x00080000
+#define   MCDEBUG_ERRCTL1_MBE_LOG_EN		0x00040000
+#define   MCDEBUG_ERRCTL1_SBE_LOG_EN		0x00020000
+#define MCDEBUG_ERRSTA				0x730
+#define   MCDEBUG_ERRSTA_RFL_STATUS		0x00000004
+#define   MCDEBUG_ERRSTA_MBE_STATUS		0x00000002
+#define   MCDEBUG_ERRSTA_SBE_STATUS		0x00000001
+#define MCDEBUG_ERRCNT1				0x734
+#define   MCDEBUG_ERRCNT1_SBE_CNT_OVRFLO	0x00000080
+#define MCDEBUG_ERRLOG1A			0x738
+#define   MCDEBUG_ERRLOG1A_MERR_TYPE_M		0x30000000
+#define   MCDEBUG_ERRLOG1A_MERR_TYPE_NONE	0x00000000
+#define   MCDEBUG_ERRLOG1A_MERR_TYPE_SBE	0x10000000
+#define   MCDEBUG_ERRLOG1A_MERR_TYPE_MBE	0x20000000
+#define   MCDEBUG_ERRLOG1A_MERR_TYPE_RFL	0x30000000
+#define   MCDEBUG_ERRLOG1A_MERR_BA_M		0x00700000
+#define   MCDEBUG_ERRLOG1A_MERR_BA_S		20
+#define   MCDEBUG_ERRLOG1A_MERR_CS_M		0x00070000
+#define   MCDEBUG_ERRLOG1A_MERR_CS_S		16
+#define   MCDEBUG_ERRLOG1A_SYNDROME_M		0x0000ffff
+#define MCDRAM_RANKCFG				0x114
+#define   MCDRAM_RANKCFG_EN			0x00000001
+#define   MCDRAM_RANKCFG_TYPE_SIZE_M		0x000001c0
+#define   MCDRAM_RANKCFG_TYPE_SIZE_S		6
+
+#define PASEMI_EDAC_NR_CSROWS			8
+#define PASEMI_EDAC_NR_CHANS			1
+#define PASEMI_EDAC_ERROR_GRAIN			64
+
+static int last_page_in_mmc;
+static int system_mmc_id;
+
+
+static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci)
+{
+	struct pci_dev *pdev = to_pci_dev(mci->dev);
+	u32 tmp;
+
+	pci_read_config_dword(pdev, MCDEBUG_ERRSTA,
+			      &tmp);
+
+	tmp &= (MCDEBUG_ERRSTA_RFL_STATUS | MCDEBUG_ERRSTA_MBE_STATUS
+		| MCDEBUG_ERRSTA_SBE_STATUS);
+
+	if (tmp) {
+		if (tmp & MCDEBUG_ERRSTA_SBE_STATUS)
+			pci_write_config_dword(pdev, MCDEBUG_ERRCNT1,
+					       MCDEBUG_ERRCNT1_SBE_CNT_OVRFLO);
+		pci_write_config_dword(pdev, MCDEBUG_ERRSTA, tmp);
+	}
+
+	return tmp;
+}
+
+static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
+{
+	struct pci_dev *pdev = to_pci_dev(mci->dev);
+	u32 errlog1a;
+	u32 cs;
+
+	if (!errsta)
+		return;
+
+	pci_read_config_dword(pdev, MCDEBUG_ERRLOG1A, &errlog1a);
+
+	cs = (errlog1a & MCDEBUG_ERRLOG1A_MERR_CS_M) >>
+		MCDEBUG_ERRLOG1A_MERR_CS_S;
+
+	/* uncorrectable/multi-bit errors */
+	if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
+		      MCDEBUG_ERRSTA_RFL_STATUS)) {
+		edac_mc_handle_ue(mci, mci->csrows[cs].first_page, 0,
+				  cs, mci->ctl_name);
+	}
+
+	/* correctable/single-bit errors */
+	if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) {
+		edac_mc_handle_ce(mci, mci->csrows[cs].first_page, 0,
+				  0, cs, 0, mci->ctl_name);
+	}
+}
+
+static void pasemi_edac_check(struct mem_ctl_info *mci)
+{
+	u32 errsta;
+
+	errsta = pasemi_edac_get_error_info(mci);
+	if (errsta)
+		pasemi_edac_process_error_info(mci, errsta);
+}
+
+static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
+				   struct pci_dev *pdev,
+				   enum edac_type edac_mode)
+{
+	struct csrow_info *csrow;
+	u32 rankcfg;
+	int index;
+
+	for (index = 0; index < mci->nr_csrows; index++) {
+		csrow = &mci->csrows[index];
+
+		pci_read_config_dword(pdev,
+				      MCDRAM_RANKCFG + (index * 12),
+				      &rankcfg);
+
+		if (!(rankcfg & MCDRAM_RANKCFG_EN))
+			continue;
+
+		switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >>
+			MCDRAM_RANKCFG_TYPE_SIZE_S) {
+		case 0:
+			csrow->nr_pages = 128 << (20 - PAGE_SHIFT);
+			break;
+		case 1:
+			csrow->nr_pages = 256 << (20 - PAGE_SHIFT);
+			break;
+		case 2:
+		case 3:
+			csrow->nr_pages = 512 << (20 - PAGE_SHIFT);
+			break;
+		case 4:
+			csrow->nr_pages = 1024 << (20 - PAGE_SHIFT);
+			break;
+		case 5:
+			csrow->nr_pages = 2048 << (20 - PAGE_SHIFT);
+			break;
+		default:
+			edac_mc_printk(mci, KERN_ERR,
+				"Unrecognized Rank Config. rankcfg=%u\n",
+				rankcfg);
+			return -EINVAL;
+		}
+
+		csrow->first_page = last_page_in_mmc;
+		csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+		last_page_in_mmc += csrow->nr_pages;
+		csrow->page_mask = 0;
+		csrow->grain = PASEMI_EDAC_ERROR_GRAIN;
+		csrow->mtype = MEM_DDR;
+		csrow->dtype = DEV_UNKNOWN;
+		csrow->edac_mode = edac_mode;
+	}
+	return 0;
+}
+
+static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
+		const struct pci_device_id *ent)
+{
+	struct mem_ctl_info *mci = NULL;
+	u32 errctl1, errcor, scrub, mcen;
+
+	pci_read_config_dword(pdev, MCCFG_MCEN, &mcen);
+	if (!(mcen & MCCFG_MCEN_MMC_EN))
+		return -ENODEV;
+
+	/*
+	 * We should think about enabling other error detection later on
+	 */
+
+	pci_read_config_dword(pdev, MCDEBUG_ERRCTL1, &errctl1);
+	errctl1 |= MCDEBUG_ERRCTL1_SBE_LOG_EN |
+		MCDEBUG_ERRCTL1_MBE_LOG_EN |
+		MCDEBUG_ERRCTL1_RFL_LOG_EN;
+	pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1);
+
+	mci = edac_mc_alloc(0, PASEMI_EDAC_NR_CSROWS, PASEMI_EDAC_NR_CHANS,
+				system_mmc_id++);
+
+	if (mci == NULL)
+		return -ENOMEM;
+
+	pci_read_config_dword(pdev, MCCFG_ERRCOR, &errcor);
+	errcor |= MCCFG_ERRCOR_RNK_FAIL_DET_EN |
+		MCCFG_ERRCOR_ECC_GEN_EN |
+		MCCFG_ERRCOR_ECC_CRR_EN;
+
+	mci->dev = &pdev->dev;
+	mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+	mci->edac_cap = (errcor & MCCFG_ERRCOR_ECC_GEN_EN) ?
+		((errcor & MCCFG_ERRCOR_ECC_CRR_EN) ?
+		 (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_EC) :
+		EDAC_FLAG_NONE;
+	mci->mod_name = MODULE_NAME;
+	mci->dev_name = pci_name(pdev);
+	mci->ctl_name = "pasemi,pwrficient-mc";
+	mci->edac_check = pasemi_edac_check;
+	mci->ctl_page_to_phys = NULL;
+	pci_read_config_dword(pdev, MCCFG_SCRUB, &scrub);
+	mci->scrub_cap = SCRUB_FLAG_HW_PROG | SCRUB_FLAG_HW_SRC;
+	mci->scrub_mode =
+		((errcor & MCCFG_ERRCOR_ECC_CRR_EN) ? SCRUB_FLAG_HW_SRC : 0) |
+		((scrub & MCCFG_SCRUB_RGLR_SCRB_EN) ? SCRUB_FLAG_HW_PROG : 0);
+
+	if (pasemi_edac_init_csrows(mci, pdev,
+				    (mci->edac_cap & EDAC_FLAG_SECDED) ?
+				    EDAC_SECDED :
+				    ((mci->edac_cap & EDAC_FLAG_EC) ?
+				     EDAC_EC : EDAC_NONE)))
+		goto fail;
+
+	/*
+	 * Clear status
+	 */
+	pasemi_edac_get_error_info(mci);
+
+	if (edac_mc_add_mc(mci))
+		goto fail;
+
+	/* get this far and it's successful */
+	return 0;
+
+fail:
+	edac_mc_free(mci);
+	return -ENODEV;
+}
+
+static void __devexit pasemi_edac_remove(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
+
+	if (!mci)
+		return;
+
+	edac_mc_free(mci);
+}
+
+
+static const struct pci_device_id pasemi_edac_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa00a) },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(pci, pasemi_edac_pci_tbl);
+
+static struct pci_driver pasemi_edac_driver = {
+	.name = MODULE_NAME,
+	.probe = pasemi_edac_probe,
+	.remove = __devexit_p(pasemi_edac_remove),
+	.id_table = pasemi_edac_pci_tbl,
+};
+
+static int __init pasemi_edac_init(void)
+{
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
+	return pci_register_driver(&pasemi_edac_driver);
+}
+
+static void __exit pasemi_edac_exit(void)
+{
+	pci_unregister_driver(&pasemi_edac_driver);
+}
+
+module_init(pasemi_edac_init);
+module_exit(pasemi_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
+MODULE_DESCRIPTION("MC support for PA Semi PWRficient memory controller");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
new file mode 100644
index 0000000..11f2172
--- /dev/null
+++ b/drivers/edac/ppc4xx_edac.c
@@ -0,0 +1,1448 @@
+/*
+ * Copyright (c) 2008 Nuovation System Designs, LLC
+ *   Grant Erickson <gerickson@nuovations.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/edac.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/types.h>
+
+#include <asm/dcr.h>
+
+#include "edac_core.h"
+#include "ppc4xx_edac.h"
+
+/*
+ * This file implements a driver for monitoring and handling events
+ * associated with the IMB DDR2 ECC controller found in the AMCC/IBM
+ * 405EX[r], 440SP, 440SPe, 460EX, 460GT and 460SX.
+ *
+ * As realized in the 405EX[r], this controller features:
+ *
+ *   - Support for registered- and non-registered DDR1 and DDR2 memory.
+ *   - 32-bit or 16-bit memory interface with optional ECC.
+ *
+ *     o ECC support includes:
+ *
+ *       - 4-bit SEC/DED
+ *       - Aligned-nibble error detect
+ *       - Bypass mode
+ *
+ *   - Two (2) memory banks/ranks.
+ *   - Up to 1 GiB per bank/rank in 32-bit mode and up to 512 MiB per
+ *     bank/rank in 16-bit mode.
+ *
+ * As realized in the 440SP and 440SPe, this controller changes/adds:
+ *
+ *   - 64-bit or 32-bit memory interface with optional ECC.
+ *
+ *     o ECC support includes:
+ *
+ *       - 8-bit SEC/DED
+ *       - Aligned-nibble error detect
+ *       - Bypass mode
+ *
+ *   - Up to 4 GiB per bank/rank in 64-bit mode and up to 2 GiB
+ *     per bank/rank in 32-bit mode.
+ *
+ * As realized in the 460EX and 460GT, this controller changes/adds:
+ *
+ *   - 64-bit or 32-bit memory interface with optional ECC.
+ *
+ *     o ECC support includes:
+ *
+ *       - 8-bit SEC/DED
+ *       - Aligned-nibble error detect
+ *       - Bypass mode
+ *
+ *   - Four (4) memory banks/ranks.
+ *   - Up to 16 GiB per bank/rank in 64-bit mode and up to 8 GiB
+ *     per bank/rank in 32-bit mode.
+ *
+ * At present, this driver has ONLY been tested against the controller
+ * realization in the 405EX[r] on the AMCC Kilauea and Haleakala
+ * boards (256 MiB w/o ECC memory soldered onto the board) and a
+ * proprietary board based on those designs (128 MiB ECC memory, also
+ * soldered onto the board).
+ *
+ * Dynamic feature detection and handling needs to be added for the
+ * other realizations of this controller listed above.
+ *
+ * Eventually, this driver will likely be adapted to the above variant
+ * realizations of this controller as well as broken apart to handle
+ * the other known ECC-capable controllers prevalent in other 4xx
+ * processors:
+ *
+ *   - IBM SDRAM (405GP, 405CR and 405EP) "ibm,sdram-4xx"
+ *   - IBM DDR1 (440GP, 440GX, 440EP and 440GR) "ibm,sdram-4xx-ddr"
+ *   - Denali DDR1/DDR2 (440EPX and 440GRX) "denali,sdram-4xx-ddr2"
+ *
+ * For this controller, unfortunately, correctable errors report
+ * nothing more than the beat/cycle and byte/lane the correction
+ * occurred on and the check bit group that covered the error.
+ *
+ * In contrast, uncorrectable errors also report the failing address,
+ * the bus master and the transaction direction (i.e. read or write)
+ *
+ * Regardless of whether the error is a CE or a UE, we report the
+ * following pieces of information in the driver-unique message to the
+ * EDAC subsystem:
+ *
+ *   - Device tree path
+ *   - Bank(s)
+ *   - Check bit error group
+ *   - Beat(s)/lane(s)
+ */
+
+/* Preprocessor Definitions */
+
+#define EDAC_OPSTATE_INT_STR		"interrupt"
+#define EDAC_OPSTATE_POLL_STR		"polled"
+#define EDAC_OPSTATE_UNKNOWN_STR	"unknown"
+
+#define PPC4XX_EDAC_MODULE_NAME		"ppc4xx_edac"
+#define PPC4XX_EDAC_MODULE_REVISION	"v1.0.0 " __DATE__
+
+#define PPC4XX_EDAC_MESSAGE_SIZE	256
+
+/*
+ * Kernel logging without an EDAC instance
+ */
+#define ppc4xx_edac_printk(level, fmt, arg...) \
+	edac_printk(level, "PPC4xx MC", fmt, ##arg)
+
+/*
+ * Kernel logging with an EDAC instance
+ */
+#define ppc4xx_edac_mc_printk(level, mci, fmt, arg...) \
+	edac_mc_chipset_printk(mci, level, "PPC4xx", fmt, ##arg)
+
+/*
+ * Macros to convert bank configuration size enumerations into MiB and
+ * page values.
+ */
+#define SDRAM_MBCF_SZ_MiB_MIN		4
+#define SDRAM_MBCF_SZ_TO_MiB(n)		(SDRAM_MBCF_SZ_MiB_MIN \
+					 << (SDRAM_MBCF_SZ_DECODE(n)))
+#define SDRAM_MBCF_SZ_TO_PAGES(n)	(SDRAM_MBCF_SZ_MiB_MIN \
+					 << (20 - PAGE_SHIFT + \
+					     SDRAM_MBCF_SZ_DECODE(n)))
+
+/*
+ * The ibm,sdram-4xx-ddr2 Device Control Registers (DCRs) are
+ * indirectly acccessed and have a base and length defined by the
+ * device tree. The base can be anything; however, we expect the
+ * length to be precisely two registers, the first for the address
+ * window and the second for the data window.
+ */
+#define SDRAM_DCR_RESOURCE_LEN		2
+#define SDRAM_DCR_ADDR_OFFSET		0
+#define SDRAM_DCR_DATA_OFFSET		1
+
+/*
+ * Device tree interrupt indices
+ */
+#define INTMAP_ECCDED_INDEX		0	/* Double-bit Error Detect */
+#define INTMAP_ECCSEC_INDEX		1	/* Single-bit Error Correct */
+
+/* Type Definitions */
+
+/*
+ * PPC4xx SDRAM memory controller private instance data
+ */
+struct ppc4xx_edac_pdata {
+	dcr_host_t dcr_host;	/* Indirect DCR address/data window mapping */
+	struct {
+		int sec;	/* Single-bit correctable error IRQ assigned */
+		int ded;	/* Double-bit detectable error IRQ assigned */
+	} irqs;
+};
+
+/*
+ * Various status data gathered and manipulated when checking and
+ * reporting ECC status.
+ */
+struct ppc4xx_ecc_status {
+	u32 ecces;
+	u32 besr;
+	u32 bearh;
+	u32 bearl;
+	u32 wmirq;
+};
+
+/* Function Prototypes */
+
+static int ppc4xx_edac_probe(struct of_device *device,
+			     const struct of_device_id *device_id);
+static int ppc4xx_edac_remove(struct of_device *device);
+
+/* Global Variables */
+
+/*
+ * Device tree node type and compatible tuples this driver can match
+ * on.
+ */
+static struct of_device_id ppc4xx_edac_match[] = {
+	{
+		.compatible	= "ibm,sdram-4xx-ddr2"
+	},
+	{ }
+};
+
+static struct of_platform_driver ppc4xx_edac_driver = {
+	.match_table		= ppc4xx_edac_match,
+	.probe			= ppc4xx_edac_probe,
+	.remove			= ppc4xx_edac_remove,
+	.driver			= {
+		.owner	= THIS_MODULE,
+		.name	= PPC4XX_EDAC_MODULE_NAME
+	}
+};
+
+/*
+ * TODO: The row and channel parameters likely need to be dynamically
+ * set based on the aforementioned variant controller realizations.
+ */
+static const unsigned ppc4xx_edac_nr_csrows = 2;
+static const unsigned ppc4xx_edac_nr_chans = 1;
+
+/*
+ * Strings associated with PLB master IDs capable of being posted in
+ * SDRAM_BESR or SDRAM_WMIRQ on uncorrectable ECC errors.
+ */
+static const char * const ppc4xx_plb_masters[9] = {
+	[SDRAM_PLB_M0ID_ICU]	= "ICU",
+	[SDRAM_PLB_M0ID_PCIE0]	= "PCI-E 0",
+	[SDRAM_PLB_M0ID_PCIE1]	= "PCI-E 1",
+	[SDRAM_PLB_M0ID_DMA]	= "DMA",
+	[SDRAM_PLB_M0ID_DCU]	= "DCU",
+	[SDRAM_PLB_M0ID_OPB]	= "OPB",
+	[SDRAM_PLB_M0ID_MAL]	= "MAL",
+	[SDRAM_PLB_M0ID_SEC]	= "SEC",
+	[SDRAM_PLB_M0ID_AHB]	= "AHB"
+};
+
+/**
+ * mfsdram - read and return controller register data
+ * @dcr_host: A pointer to the DCR mapping.
+ * @idcr_n: The indirect DCR register to read.
+ *
+ * This routine reads and returns the data associated with the
+ * controller's specified indirect DCR register.
+ *
+ * Returns the read data.
+ */
+static inline u32
+mfsdram(const dcr_host_t *dcr_host, unsigned int idcr_n)
+{
+	return __mfdcri(dcr_host->base + SDRAM_DCR_ADDR_OFFSET,
+			dcr_host->base + SDRAM_DCR_DATA_OFFSET,
+			idcr_n);
+}
+
+/**
+ * mtsdram - write controller register data
+ * @dcr_host: A pointer to the DCR mapping.
+ * @idcr_n: The indirect DCR register to write.
+ * @value: The data to write.
+ *
+ * This routine writes the provided data to the controller's specified
+ * indirect DCR register.
+ */
+static inline void
+mtsdram(const dcr_host_t *dcr_host, unsigned int idcr_n, u32 value)
+{
+	return __mtdcri(dcr_host->base + SDRAM_DCR_ADDR_OFFSET,
+			dcr_host->base + SDRAM_DCR_DATA_OFFSET,
+			idcr_n,
+			value);
+}
+
+/**
+ * ppc4xx_edac_check_bank_error - check a bank for an ECC bank error
+ * @status: A pointer to the ECC status structure to check for an
+ *          ECC bank error.
+ * @bank: The bank to check for an ECC error.
+ *
+ * This routine determines whether the specified bank has an ECC
+ * error.
+ *
+ * Returns true if the specified bank has an ECC error; otherwise,
+ * false.
+ */
+static bool
+ppc4xx_edac_check_bank_error(const struct ppc4xx_ecc_status *status,
+			     unsigned int bank)
+{
+	switch (bank) {
+	case 0:
+		return status->ecces & SDRAM_ECCES_BK0ER;
+	case 1:
+		return status->ecces & SDRAM_ECCES_BK1ER;
+	default:
+		return false;
+	}
+}
+
+/**
+ * ppc4xx_edac_generate_bank_message - generate interpretted bank status message
+ * @mci: A pointer to the EDAC memory controller instance associated
+ *       with the bank message being generated.
+ * @status: A pointer to the ECC status structure to generate the
+ *          message from.
+ * @buffer: A pointer to the buffer in which to generate the
+ *          message.
+ * @size: The size, in bytes, of space available in buffer.
+ *
+ * This routine generates to the provided buffer the portion of the
+ * driver-unique report message associated with the ECCESS[BKNER]
+ * field of the specified ECC status.
+ *
+ * Returns the number of characters generated on success; otherwise, <
+ * 0 on error.
+ */
+static int
+ppc4xx_edac_generate_bank_message(const struct mem_ctl_info *mci,
+				  const struct ppc4xx_ecc_status *status,
+				  char *buffer,
+				  size_t size)
+{
+	int n, total = 0;
+	unsigned int row, rows;
+
+	n = snprintf(buffer, size, "%s: Banks: ", mci->dev_name);
+
+	if (n < 0 || n >= size)
+		goto fail;
+
+	buffer += n;
+	size -= n;
+	total += n;
+
+	for (rows = 0, row = 0; row < mci->nr_csrows; row++) {
+		if (ppc4xx_edac_check_bank_error(status, row)) {
+			n = snprintf(buffer, size, "%s%u",
+					(rows++ ? ", " : ""), row);
+
+			if (n < 0 || n >= size)
+				goto fail;
+
+			buffer += n;
+			size -= n;
+			total += n;
+		}
+	}
+
+	n = snprintf(buffer, size, "%s; ", rows ? "" : "None");
+
+	if (n < 0 || n >= size)
+		goto fail;
+
+	buffer += n;
+	size -= n;
+	total += n;
+
+ fail:
+	return total;
+}
+
+/**
+ * ppc4xx_edac_generate_checkbit_message - generate interpretted checkbit message
+ * @mci: A pointer to the EDAC memory controller instance associated
+ *       with the checkbit message being generated.
+ * @status: A pointer to the ECC status structure to generate the
+ *          message from.
+ * @buffer: A pointer to the buffer in which to generate the
+ *          message.
+ * @size: The size, in bytes, of space available in buffer.
+ *
+ * This routine generates to the provided buffer the portion of the
+ * driver-unique report message associated with the ECCESS[CKBER]
+ * field of the specified ECC status.
+ *
+ * Returns the number of characters generated on success; otherwise, <
+ * 0 on error.
+ */
+static int
+ppc4xx_edac_generate_checkbit_message(const struct mem_ctl_info *mci,
+				      const struct ppc4xx_ecc_status *status,
+				      char *buffer,
+				      size_t size)
+{
+	const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
+	const char *ckber = NULL;
+
+	switch (status->ecces & SDRAM_ECCES_CKBER_MASK) {
+	case SDRAM_ECCES_CKBER_NONE:
+		ckber = "None";
+		break;
+	case SDRAM_ECCES_CKBER_32_ECC_0_3:
+		ckber = "ECC0:3";
+		break;
+	case SDRAM_ECCES_CKBER_32_ECC_4_8:
+		switch (mfsdram(&pdata->dcr_host, SDRAM_MCOPT1) &
+			SDRAM_MCOPT1_WDTH_MASK) {
+		case SDRAM_MCOPT1_WDTH_16:
+			ckber = "ECC0:3";
+			break;
+		case SDRAM_MCOPT1_WDTH_32:
+			ckber = "ECC4:8";
+			break;
+		default:
+			ckber = "Unknown";
+			break;
+		}
+		break;
+	case SDRAM_ECCES_CKBER_32_ECC_0_8:
+		ckber = "ECC0:8";
+		break;
+	default:
+		ckber = "Unknown";
+		break;
+	}
+
+	return snprintf(buffer, size, "Checkbit Error: %s", ckber);
+}
+
+/**
+ * ppc4xx_edac_generate_lane_message - generate interpretted byte lane message
+ * @mci: A pointer to the EDAC memory controller instance associated
+ *       with the byte lane message being generated.
+ * @status: A pointer to the ECC status structure to generate the
+ *          message from.
+ * @buffer: A pointer to the buffer in which to generate the
+ *          message.
+ * @size: The size, in bytes, of space available in buffer.
+ *
+ * This routine generates to the provided buffer the portion of the
+ * driver-unique report message associated with the ECCESS[BNCE]
+ * field of the specified ECC status.
+ *
+ * Returns the number of characters generated on success; otherwise, <
+ * 0 on error.
+ */
+static int
+ppc4xx_edac_generate_lane_message(const struct mem_ctl_info *mci,
+				  const struct ppc4xx_ecc_status *status,
+				  char *buffer,
+				  size_t size)
+{
+	int n, total = 0;
+	unsigned int lane, lanes;
+	const unsigned int first_lane = 0;
+	const unsigned int lane_count = 16;
+
+	n = snprintf(buffer, size, "; Byte Lane Errors: ");
+
+	if (n < 0 || n >= size)
+		goto fail;
+
+	buffer += n;
+	size -= n;
+	total += n;
+
+	for (lanes = 0, lane = first_lane; lane < lane_count; lane++) {
+		if ((status->ecces & SDRAM_ECCES_BNCE_ENCODE(lane)) != 0) {
+			n = snprintf(buffer, size,
+				     "%s%u",
+				     (lanes++ ? ", " : ""), lane);
+
+			if (n < 0 || n >= size)
+				goto fail;
+
+			buffer += n;
+			size -= n;
+			total += n;
+		}
+	}
+
+	n = snprintf(buffer, size, "%s; ", lanes ? "" : "None");
+
+	if (n < 0 || n >= size)
+		goto fail;
+
+	buffer += n;
+	size -= n;
+	total += n;
+
+ fail:
+	return total;
+}
+
+/**
+ * ppc4xx_edac_generate_ecc_message - generate interpretted ECC status message
+ * @mci: A pointer to the EDAC memory controller instance associated
+ *       with the ECCES message being generated.
+ * @status: A pointer to the ECC status structure to generate the
+ *          message from.
+ * @buffer: A pointer to the buffer in which to generate the
+ *          message.
+ * @size: The size, in bytes, of space available in buffer.
+ *
+ * This routine generates to the provided buffer the portion of the
+ * driver-unique report message associated with the ECCESS register of
+ * the specified ECC status.
+ *
+ * Returns the number of characters generated on success; otherwise, <
+ * 0 on error.
+ */
+static int
+ppc4xx_edac_generate_ecc_message(const struct mem_ctl_info *mci,
+				 const struct ppc4xx_ecc_status *status,
+				 char *buffer,
+				 size_t size)
+{
+	int n, total = 0;
+
+	n = ppc4xx_edac_generate_bank_message(mci, status, buffer, size);
+
+	if (n < 0 || n >= size)
+		goto fail;
+
+	buffer += n;
+	size -= n;
+	total += n;
+
+	n = ppc4xx_edac_generate_checkbit_message(mci, status, buffer, size);
+
+	if (n < 0 || n >= size)
+		goto fail;
+
+	buffer += n;
+	size -= n;
+	total += n;
+
+	n = ppc4xx_edac_generate_lane_message(mci, status, buffer, size);
+
+	if (n < 0 || n >= size)
+		goto fail;
+
+	buffer += n;
+	size -= n;
+	total += n;
+
+ fail:
+	return total;
+}
+
+/**
+ * ppc4xx_edac_generate_plb_message - generate interpretted PLB status message
+ * @mci: A pointer to the EDAC memory controller instance associated
+ *       with the PLB message being generated.
+ * @status: A pointer to the ECC status structure to generate the
+ *          message from.
+ * @buffer: A pointer to the buffer in which to generate the
+ *          message.
+ * @size: The size, in bytes, of space available in buffer.
+ *
+ * This routine generates to the provided buffer the portion of the
+ * driver-unique report message associated with the PLB-related BESR
+ * and/or WMIRQ registers of the specified ECC status.
+ *
+ * Returns the number of characters generated on success; otherwise, <
+ * 0 on error.
+ */
+static int
+ppc4xx_edac_generate_plb_message(const struct mem_ctl_info *mci,
+				 const struct ppc4xx_ecc_status *status,
+				 char *buffer,
+				 size_t size)
+{
+	unsigned int master;
+	bool read;
+
+	if ((status->besr & SDRAM_BESR_MASK) == 0)
+		return 0;
+
+	if ((status->besr & SDRAM_BESR_M0ET_MASK) == SDRAM_BESR_M0ET_NONE)
+		return 0;
+
+	read = ((status->besr & SDRAM_BESR_M0RW_MASK) == SDRAM_BESR_M0RW_READ);
+
+	master = SDRAM_BESR_M0ID_DECODE(status->besr);
+
+	return snprintf(buffer, size,
+			"%s error w/ PLB master %u \"%s\"; ",
+			(read ? "Read" : "Write"),
+			master,
+			(((master >= SDRAM_PLB_M0ID_FIRST) &&
+			  (master <= SDRAM_PLB_M0ID_LAST)) ?
+			 ppc4xx_plb_masters[master] : "UNKNOWN"));
+}
+
+/**
+ * ppc4xx_edac_generate_message - generate interpretted status message
+ * @mci: A pointer to the EDAC memory controller instance associated
+ *       with the driver-unique message being generated.
+ * @status: A pointer to the ECC status structure to generate the
+ *          message from.
+ * @buffer: A pointer to the buffer in which to generate the
+ *          message.
+ * @size: The size, in bytes, of space available in buffer.
+ *
+ * This routine generates to the provided buffer the driver-unique
+ * EDAC report message from the specified ECC status.
+ */
+static void
+ppc4xx_edac_generate_message(const struct mem_ctl_info *mci,
+			     const struct ppc4xx_ecc_status *status,
+			     char *buffer,
+			     size_t size)
+{
+	int n;
+
+	if (buffer == NULL || size == 0)
+		return;
+
+	n = ppc4xx_edac_generate_ecc_message(mci, status, buffer, size);
+
+	if (n < 0 || n >= size)
+		return;
+
+	buffer += n;
+	size -= n;
+
+	ppc4xx_edac_generate_plb_message(mci, status, buffer, size);
+}
+
+#ifdef DEBUG
+/**
+ * ppc4xx_ecc_dump_status - dump controller ECC status registers
+ * @mci: A pointer to the EDAC memory controller instance
+ *       associated with the status being dumped.
+ * @status: A pointer to the ECC status structure to generate the
+ *          dump from.
+ *
+ * This routine dumps to the kernel log buffer the raw and
+ * interpretted specified ECC status.
+ */
+static void
+ppc4xx_ecc_dump_status(const struct mem_ctl_info *mci,
+		       const struct ppc4xx_ecc_status *status)
+{
+	char message[PPC4XX_EDAC_MESSAGE_SIZE];
+
+	ppc4xx_edac_generate_message(mci, status, message, sizeof(message));
+
+	ppc4xx_edac_mc_printk(KERN_INFO, mci,
+			      "\n"
+			      "\tECCES: 0x%08x\n"
+			      "\tWMIRQ: 0x%08x\n"
+			      "\tBESR:  0x%08x\n"
+			      "\tBEAR:  0x%08x%08x\n"
+			      "\t%s\n",
+			      status->ecces,
+			      status->wmirq,
+			      status->besr,
+			      status->bearh,
+			      status->bearl,
+			      message);
+}
+#endif /* DEBUG */
+
+/**
+ * ppc4xx_ecc_get_status - get controller ECC status
+ * @mci: A pointer to the EDAC memory controller instance
+ *       associated with the status being retrieved.
+ * @status: A pointer to the ECC status structure to populate the
+ *          ECC status with.
+ *
+ * This routine reads and masks, as appropriate, all the relevant
+ * status registers that deal with ibm,sdram-4xx-ddr2 ECC errors.
+ * While we read all of them, for correctable errors, we only expect
+ * to deal with ECCES. For uncorrectable errors, we expect to deal
+ * with all of them.
+ */
+static void
+ppc4xx_ecc_get_status(const struct mem_ctl_info *mci,
+		      struct ppc4xx_ecc_status *status)
+{
+	const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
+	const dcr_host_t *dcr_host = &pdata->dcr_host;
+
+	status->ecces = mfsdram(dcr_host, SDRAM_ECCES) & SDRAM_ECCES_MASK;
+	status->wmirq = mfsdram(dcr_host, SDRAM_WMIRQ) & SDRAM_WMIRQ_MASK;
+	status->besr  = mfsdram(dcr_host, SDRAM_BESR)  & SDRAM_BESR_MASK;
+	status->bearl = mfsdram(dcr_host, SDRAM_BEARL);
+	status->bearh = mfsdram(dcr_host, SDRAM_BEARH);
+}
+
+/**
+ * ppc4xx_ecc_clear_status - clear controller ECC status
+ * @mci: A pointer to the EDAC memory controller instance
+ *       associated with the status being cleared.
+ * @status: A pointer to the ECC status structure containing the
+ *          values to write to clear the ECC status.
+ *
+ * This routine clears--by writing the masked (as appropriate) status
+ * values back to--the status registers that deal with
+ * ibm,sdram-4xx-ddr2 ECC errors.
+ */
+static void
+ppc4xx_ecc_clear_status(const struct mem_ctl_info *mci,
+			const struct ppc4xx_ecc_status *status)
+{
+	const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
+	const dcr_host_t *dcr_host = &pdata->dcr_host;
+
+	mtsdram(dcr_host, SDRAM_ECCES,	status->ecces & SDRAM_ECCES_MASK);
+	mtsdram(dcr_host, SDRAM_WMIRQ,	status->wmirq & SDRAM_WMIRQ_MASK);
+	mtsdram(dcr_host, SDRAM_BESR,	status->besr & SDRAM_BESR_MASK);
+	mtsdram(dcr_host, SDRAM_BEARL,	0);
+	mtsdram(dcr_host, SDRAM_BEARH,	0);
+}
+
+/**
+ * ppc4xx_edac_handle_ce - handle controller correctable ECC error (CE)
+ * @mci: A pointer to the EDAC memory controller instance
+ *       associated with the correctable error being handled and reported.
+ * @status: A pointer to the ECC status structure associated with
+ *          the correctable error being handled and reported.
+ *
+ * This routine handles an ibm,sdram-4xx-ddr2 controller ECC
+ * correctable error. Per the aforementioned discussion, there's not
+ * enough status available to use the full EDAC correctable error
+ * interface, so we just pass driver-unique message to the "no info"
+ * interface.
+ */
+static void
+ppc4xx_edac_handle_ce(struct mem_ctl_info *mci,
+		      const struct ppc4xx_ecc_status *status)
+{
+	int row;
+	char message[PPC4XX_EDAC_MESSAGE_SIZE];
+
+	ppc4xx_edac_generate_message(mci, status, message, sizeof(message));
+
+	for (row = 0; row < mci->nr_csrows; row++)
+		if (ppc4xx_edac_check_bank_error(status, row))
+			edac_mc_handle_ce_no_info(mci, message);
+}
+
+/**
+ * ppc4xx_edac_handle_ue - handle controller uncorrectable ECC error (UE)
+ * @mci: A pointer to the EDAC memory controller instance
+ *       associated with the uncorrectable error being handled and
+ *       reported.
+ * @status: A pointer to the ECC status structure associated with
+ *          the uncorrectable error being handled and reported.
+ *
+ * This routine handles an ibm,sdram-4xx-ddr2 controller ECC
+ * uncorrectable error.
+ */
+static void
+ppc4xx_edac_handle_ue(struct mem_ctl_info *mci,
+		      const struct ppc4xx_ecc_status *status)
+{
+	const u64 bear = ((u64)status->bearh << 32 | status->bearl);
+	const unsigned long page = bear >> PAGE_SHIFT;
+	const unsigned long offset = bear & ~PAGE_MASK;
+	int row;
+	char message[PPC4XX_EDAC_MESSAGE_SIZE];
+
+	ppc4xx_edac_generate_message(mci, status, message, sizeof(message));
+
+	for (row = 0; row < mci->nr_csrows; row++)
+		if (ppc4xx_edac_check_bank_error(status, row))
+			edac_mc_handle_ue(mci, page, offset, row, message);
+}
+
+/**
+ * ppc4xx_edac_check - check controller for ECC errors
+ * @mci: A pointer to the EDAC memory controller instance
+ *       associated with the ibm,sdram-4xx-ddr2 controller being
+ *       checked.
+ *
+ * This routine is used to check and post ECC errors and is called by
+ * both the EDAC polling thread and this driver's CE and UE interrupt
+ * handler.
+ */
+static void
+ppc4xx_edac_check(struct mem_ctl_info *mci)
+{
+#ifdef DEBUG
+	static unsigned int count;
+#endif
+	struct ppc4xx_ecc_status status;
+
+	ppc4xx_ecc_get_status(mci, &status);
+
+#ifdef DEBUG
+	if (count++ % 30 == 0)
+		ppc4xx_ecc_dump_status(mci, &status);
+#endif
+
+	if (status.ecces & SDRAM_ECCES_UE)
+		ppc4xx_edac_handle_ue(mci, &status);
+
+	if (status.ecces & SDRAM_ECCES_CE)
+		ppc4xx_edac_handle_ce(mci, &status);
+
+	ppc4xx_ecc_clear_status(mci, &status);
+}
+
+/**
+ * ppc4xx_edac_isr - SEC (CE) and DED (UE) interrupt service routine
+ * @irq:    The virtual interrupt number being serviced.
+ * @dev_id: A pointer to the EDAC memory controller instance
+ *          associated with the interrupt being handled.
+ *
+ * This routine implements the interrupt handler for both correctable
+ * (CE) and uncorrectable (UE) ECC errors for the ibm,sdram-4xx-ddr2
+ * controller. It simply calls through to the same routine used during
+ * polling to check, report and clear the ECC status.
+ *
+ * Unconditionally returns IRQ_HANDLED.
+ */
+static irqreturn_t
+ppc4xx_edac_isr(int irq, void *dev_id)
+{
+	struct mem_ctl_info *mci = dev_id;
+
+	ppc4xx_edac_check(mci);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ppc4xx_edac_get_dtype - return the controller memory width
+ * @mcopt1: The 32-bit Memory Controller Option 1 register value
+ *          currently set for the controller, from which the width
+ *          is derived.
+ *
+ * This routine returns the EDAC device type width appropriate for the
+ * current controller configuration.
+ *
+ * TODO: This needs to be conditioned dynamically through feature
+ * flags or some such when other controller variants are supported as
+ * the 405EX[r] is 16-/32-bit and the others are 32-/64-bit with the
+ * 16- and 64-bit field definition/value/enumeration (b1) overloaded
+ * among them.
+ *
+ * Returns a device type width enumeration.
+ */
+static enum dev_type __devinit
+ppc4xx_edac_get_dtype(u32 mcopt1)
+{
+	switch (mcopt1 & SDRAM_MCOPT1_WDTH_MASK) {
+	case SDRAM_MCOPT1_WDTH_16:
+		return DEV_X2;
+	case SDRAM_MCOPT1_WDTH_32:
+		return DEV_X4;
+	default:
+		return DEV_UNKNOWN;
+	}
+}
+
+/**
+ * ppc4xx_edac_get_mtype - return controller memory type
+ * @mcopt1: The 32-bit Memory Controller Option 1 register value
+ *          currently set for the controller, from which the memory type
+ *          is derived.
+ *
+ * This routine returns the EDAC memory type appropriate for the
+ * current controller configuration.
+ *
+ * Returns a memory type enumeration.
+ */
+static enum mem_type __devinit
+ppc4xx_edac_get_mtype(u32 mcopt1)
+{
+	bool rden = ((mcopt1 & SDRAM_MCOPT1_RDEN_MASK) == SDRAM_MCOPT1_RDEN);
+
+	switch (mcopt1 & SDRAM_MCOPT1_DDR_TYPE_MASK) {
+	case SDRAM_MCOPT1_DDR2_TYPE:
+		return rden ? MEM_RDDR2 : MEM_DDR2;
+	case SDRAM_MCOPT1_DDR1_TYPE:
+		return rden ? MEM_RDDR : MEM_DDR;
+	default:
+		return MEM_UNKNOWN;
+	}
+}
+
+/**
+ * ppc4xx_edac_init_csrows - intialize driver instance rows
+ * @mci: A pointer to the EDAC memory controller instance
+ *       associated with the ibm,sdram-4xx-ddr2 controller for which
+ *       the csrows (i.e. banks/ranks) are being initialized.
+ * @mcopt1: The 32-bit Memory Controller Option 1 register value
+ *          currently set for the controller, from which bank width
+ *          and memory typ information is derived.
+ *
+ * This routine intializes the virtual "chip select rows" associated
+ * with the EDAC memory controller instance. An ibm,sdram-4xx-ddr2
+ * controller bank/rank is mapped to a row.
+ *
+ * Returns 0 if OK; otherwise, -EINVAL if the memory bank size
+ * configuration cannot be determined.
+ */
+static int __devinit
+ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
+{
+	const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
+	int status = 0;
+	enum mem_type mtype;
+	enum dev_type dtype;
+	enum edac_type edac_mode;
+	int row;
+	u32 mbxcf, size;
+	static u32 ppc4xx_last_page;
+
+	/* Establish the memory type and width */
+
+	mtype = ppc4xx_edac_get_mtype(mcopt1);
+	dtype = ppc4xx_edac_get_dtype(mcopt1);
+
+	/* Establish EDAC mode */
+
+	if (mci->edac_cap & EDAC_FLAG_SECDED)
+		edac_mode = EDAC_SECDED;
+	else if (mci->edac_cap & EDAC_FLAG_EC)
+		edac_mode = EDAC_EC;
+	else
+		edac_mode = EDAC_NONE;
+
+	/*
+	 * Initialize each chip select row structure which correspond
+	 * 1:1 with a controller bank/rank.
+	 */
+
+	for (row = 0; row < mci->nr_csrows; row++) {
+		struct csrow_info *csi = &mci->csrows[row];
+
+		/*
+		 * Get the configuration settings for this
+		 * row/bank/rank and skip disabled banks.
+		 */
+
+		mbxcf = mfsdram(&pdata->dcr_host, SDRAM_MBXCF(row));
+
+		if ((mbxcf & SDRAM_MBCF_BE_MASK) != SDRAM_MBCF_BE_ENABLE)
+			continue;
+
+		/* Map the bank configuration size setting to pages. */
+
+		size = mbxcf & SDRAM_MBCF_SZ_MASK;
+
+		switch (size) {
+		case SDRAM_MBCF_SZ_4MB:
+		case SDRAM_MBCF_SZ_8MB:
+		case SDRAM_MBCF_SZ_16MB:
+		case SDRAM_MBCF_SZ_32MB:
+		case SDRAM_MBCF_SZ_64MB:
+		case SDRAM_MBCF_SZ_128MB:
+		case SDRAM_MBCF_SZ_256MB:
+		case SDRAM_MBCF_SZ_512MB:
+		case SDRAM_MBCF_SZ_1GB:
+		case SDRAM_MBCF_SZ_2GB:
+		case SDRAM_MBCF_SZ_4GB:
+		case SDRAM_MBCF_SZ_8GB:
+			csi->nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
+			break;
+		default:
+			ppc4xx_edac_mc_printk(KERN_ERR, mci,
+					      "Unrecognized memory bank %d "
+					      "size 0x%08x\n",
+					      row, SDRAM_MBCF_SZ_DECODE(size));
+			status = -EINVAL;
+			goto done;
+		}
+
+		csi->first_page = ppc4xx_last_page;
+		csi->last_page	= csi->first_page + csi->nr_pages - 1;
+		csi->page_mask	= 0;
+
+		/*
+		 * It's unclear exactly what grain should be set to
+		 * here. The SDRAM_ECCES register allows resolution of
+		 * an error down to a nibble which would potentially
+		 * argue for a grain of '1' byte, even though we only
+		 * know the associated address for uncorrectable
+		 * errors. This value is not used at present for
+		 * anything other than error reporting so getting it
+		 * wrong should be of little consequence. Other
+		 * possible values would be the PLB width (16), the
+		 * page size (PAGE_SIZE) or the memory width (2 or 4).
+		 */
+
+		csi->grain	= 1;
+
+		csi->mtype	= mtype;
+		csi->dtype	= dtype;
+
+		csi->edac_mode	= edac_mode;
+
+		ppc4xx_last_page += csi->nr_pages;
+	}
+
+ done:
+	return status;
+}
+
+/**
+ * ppc4xx_edac_mc_init - intialize driver instance
+ * @mci: A pointer to the EDAC memory controller instance being
+ *       initialized.
+ * @op: A pointer to the OpenFirmware device tree node associated
+ *      with the controller this EDAC instance is bound to.
+ * @match: A pointer to the OpenFirmware device tree match
+ *         information associated with the controller this EDAC instance
+ *         is bound to.
+ * @dcr_host: A pointer to the DCR data containing the DCR mapping
+ *            for this controller instance.
+ * @mcopt1: The 32-bit Memory Controller Option 1 register value
+ *          currently set for the controller, from which ECC capabilities
+ *          and scrub mode are derived.
+ *
+ * This routine performs initialization of the EDAC memory controller
+ * instance and related driver-private data associated with the
+ * ibm,sdram-4xx-ddr2 memory controller the instance is bound to.
+ *
+ * Returns 0 if OK; otherwise, < 0 on error.
+ */
+static int __devinit
+ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
+		    struct of_device *op,
+		    const struct of_device_id *match,
+		    const dcr_host_t *dcr_host,
+		    u32 mcopt1)
+{
+	int status = 0;
+	const u32 memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK);
+	struct ppc4xx_edac_pdata *pdata = NULL;
+	const struct device_node *np = op->node;
+
+	if (match == NULL)
+		return -EINVAL;
+
+	/* Initial driver pointers and private data */
+
+	mci->dev		= &op->dev;
+
+	dev_set_drvdata(mci->dev, mci);
+
+	pdata			= mci->pvt_info;
+
+	pdata->dcr_host		= *dcr_host;
+	pdata->irqs.sec		= NO_IRQ;
+	pdata->irqs.ded		= NO_IRQ;
+
+	/* Initialize controller capabilities and configuration */
+
+	mci->mtype_cap		= (MEM_FLAG_DDR | MEM_FLAG_RDDR |
+				   MEM_FLAG_DDR2 | MEM_FLAG_RDDR2);
+
+	mci->edac_ctl_cap	= (EDAC_FLAG_NONE |
+				   EDAC_FLAG_EC |
+				   EDAC_FLAG_SECDED);
+
+	mci->scrub_cap		= SCRUB_NONE;
+	mci->scrub_mode		= SCRUB_NONE;
+
+	/*
+	 * Update the actual capabilites based on the MCOPT1[MCHK]
+	 * settings. Scrubbing is only useful if reporting is enabled.
+	 */
+
+	switch (memcheck) {
+	case SDRAM_MCOPT1_MCHK_CHK:
+		mci->edac_cap	= EDAC_FLAG_EC;
+		break;
+	case SDRAM_MCOPT1_MCHK_CHK_REP:
+		mci->edac_cap	= (EDAC_FLAG_EC | EDAC_FLAG_SECDED);
+		mci->scrub_mode	= SCRUB_SW_SRC;
+		break;
+	default:
+		mci->edac_cap	= EDAC_FLAG_NONE;
+		break;
+	}
+
+	/* Initialize strings */
+
+	mci->mod_name		= PPC4XX_EDAC_MODULE_NAME;
+	mci->mod_ver		= PPC4XX_EDAC_MODULE_REVISION;
+	mci->ctl_name		= match->compatible,
+	mci->dev_name		= np->full_name;
+
+	/* Initialize callbacks */
+
+	mci->edac_check		= ppc4xx_edac_check;
+	mci->ctl_page_to_phys	= NULL;
+
+	/* Initialize chip select rows */
+
+	status = ppc4xx_edac_init_csrows(mci, mcopt1);
+
+	if (status)
+		ppc4xx_edac_mc_printk(KERN_ERR, mci,
+				      "Failed to initialize rows!\n");
+
+	return status;
+}
+
+/**
+ * ppc4xx_edac_register_irq - setup and register controller interrupts
+ * @op: A pointer to the OpenFirmware device tree node associated
+ *      with the controller this EDAC instance is bound to.
+ * @mci: A pointer to the EDAC memory controller instance
+ *       associated with the ibm,sdram-4xx-ddr2 controller for which
+ *       interrupts are being registered.
+ *
+ * This routine parses the correctable (CE) and uncorrectable error (UE)
+ * interrupts from the device tree node and maps and assigns them to
+ * the associated EDAC memory controller instance.
+ *
+ * Returns 0 if OK; otherwise, -ENODEV if the interrupts could not be
+ * mapped and assigned.
+ */
+static int __devinit
+ppc4xx_edac_register_irq(struct of_device *op, struct mem_ctl_info *mci)
+{
+	int status = 0;
+	int ded_irq, sec_irq;
+	struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
+	struct device_node *np = op->node;
+
+	ded_irq = irq_of_parse_and_map(np, INTMAP_ECCDED_INDEX);
+	sec_irq = irq_of_parse_and_map(np, INTMAP_ECCSEC_INDEX);
+
+	if (ded_irq == NO_IRQ || sec_irq == NO_IRQ) {
+		ppc4xx_edac_mc_printk(KERN_ERR, mci,
+				      "Unable to map interrupts.\n");
+		status = -ENODEV;
+		goto fail;
+	}
+
+	status = request_irq(ded_irq,
+			     ppc4xx_edac_isr,
+			     IRQF_DISABLED,
+			     "[EDAC] MC ECCDED",
+			     mci);
+
+	if (status < 0) {
+		ppc4xx_edac_mc_printk(KERN_ERR, mci,
+				      "Unable to request irq %d for ECC DED",
+				      ded_irq);
+		status = -ENODEV;
+		goto fail1;
+	}
+
+	status = request_irq(sec_irq,
+			     ppc4xx_edac_isr,
+			     IRQF_DISABLED,
+			     "[EDAC] MC ECCSEC",
+			     mci);
+
+	if (status < 0) {
+		ppc4xx_edac_mc_printk(KERN_ERR, mci,
+				      "Unable to request irq %d for ECC SEC",
+				      sec_irq);
+		status = -ENODEV;
+		goto fail2;
+	}
+
+	ppc4xx_edac_mc_printk(KERN_INFO, mci, "ECCDED irq is %d\n", ded_irq);
+	ppc4xx_edac_mc_printk(KERN_INFO, mci, "ECCSEC irq is %d\n", sec_irq);
+
+	pdata->irqs.ded = ded_irq;
+	pdata->irqs.sec = sec_irq;
+
+	return 0;
+
+ fail2:
+	free_irq(sec_irq, mci);
+
+ fail1:
+	free_irq(ded_irq, mci);
+
+ fail:
+	return status;
+}
+
+/**
+ * ppc4xx_edac_map_dcrs - locate and map controller registers
+ * @np: A pointer to the device tree node containing the DCR
+ *      resources to map.
+ * @dcr_host: A pointer to the DCR data to populate with the
+ *            DCR mapping.
+ *
+ * This routine attempts to locate in the device tree and map the DCR
+ * register resources associated with the controller's indirect DCR
+ * address and data windows.
+ *
+ * Returns 0 if the DCRs were successfully mapped; otherwise, < 0 on
+ * error.
+ */
+static int __devinit
+ppc4xx_edac_map_dcrs(const struct device_node *np, dcr_host_t *dcr_host)
+{
+	unsigned int dcr_base, dcr_len;
+
+	if (np == NULL || dcr_host == NULL)
+		return -EINVAL;
+
+	/* Get the DCR resource extent and sanity check the values. */
+
+	dcr_base = dcr_resource_start(np, 0);
+	dcr_len = dcr_resource_len(np, 0);
+
+	if (dcr_base == 0 || dcr_len == 0) {
+		ppc4xx_edac_printk(KERN_ERR,
+				   "Failed to obtain DCR property.\n");
+		return -ENODEV;
+	}
+
+	if (dcr_len != SDRAM_DCR_RESOURCE_LEN) {
+		ppc4xx_edac_printk(KERN_ERR,
+				   "Unexpected DCR length %d, expected %d.\n",
+				   dcr_len, SDRAM_DCR_RESOURCE_LEN);
+		return -ENODEV;
+	}
+
+	/*  Attempt to map the DCR extent. */
+
+	*dcr_host = dcr_map(np, dcr_base, dcr_len);
+
+	if (!DCR_MAP_OK(*dcr_host)) {
+		ppc4xx_edac_printk(KERN_INFO, "Failed to map DCRs.\n");
+		    return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+ * ppc4xx_edac_probe - check controller and bind driver
+ * @op: A pointer to the OpenFirmware device tree node associated
+ *      with the controller being probed for driver binding.
+ * @match: A pointer to the OpenFirmware device tree match
+ *         information associated with the controller being probed
+ *         for driver binding.
+ *
+ * This routine probes a specific ibm,sdram-4xx-ddr2 controller
+ * instance for binding with the driver.
+ *
+ * Returns 0 if the controller instance was successfully bound to the
+ * driver; otherwise, < 0 on error.
+ */
+static int __devinit
+ppc4xx_edac_probe(struct of_device *op, const struct of_device_id *match)
+{
+	int status = 0;
+	u32 mcopt1, memcheck;
+	dcr_host_t dcr_host;
+	const struct device_node *np = op->node;
+	struct mem_ctl_info *mci = NULL;
+	static int ppc4xx_edac_instance;
+
+	/*
+	 * At this point, we only support the controller realized on
+	 * the AMCC PPC 405EX[r]. Reject anything else.
+	 */
+
+	if (!of_device_is_compatible(np, "ibm,sdram-405ex") &&
+	    !of_device_is_compatible(np, "ibm,sdram-405exr")) {
+		ppc4xx_edac_printk(KERN_NOTICE,
+				   "Only the PPC405EX[r] is supported.\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * Next, get the DCR property and attempt to map it so that we
+	 * can probe the controller.
+	 */
+
+	status = ppc4xx_edac_map_dcrs(np, &dcr_host);
+
+	if (status)
+		return status;
+
+	/*
+	 * First determine whether ECC is enabled at all. If not,
+	 * there is no useful checking or monitoring that can be done
+	 * for this controller.
+	 */
+
+	mcopt1 = mfsdram(&dcr_host, SDRAM_MCOPT1);
+	memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK);
+
+	if (memcheck == SDRAM_MCOPT1_MCHK_NON) {
+		ppc4xx_edac_printk(KERN_INFO, "%s: No ECC memory detected or "
+				   "ECC is disabled.\n", np->full_name);
+		status = -ENODEV;
+		goto done;
+	}
+
+	/*
+	 * At this point, we know ECC is enabled, allocate an EDAC
+	 * controller instance and perform the appropriate
+	 * initialization.
+	 */
+
+	mci = edac_mc_alloc(sizeof(struct ppc4xx_edac_pdata),
+			    ppc4xx_edac_nr_csrows,
+			    ppc4xx_edac_nr_chans,
+			    ppc4xx_edac_instance);
+
+	if (mci == NULL) {
+		ppc4xx_edac_printk(KERN_ERR, "%s: "
+				   "Failed to allocate EDAC MC instance!\n",
+				   np->full_name);
+		status = -ENOMEM;
+		goto done;
+	}
+
+	status = ppc4xx_edac_mc_init(mci, op, match, &dcr_host, mcopt1);
+
+	if (status) {
+		ppc4xx_edac_mc_printk(KERN_ERR, mci,
+				      "Failed to initialize instance!\n");
+		goto fail;
+	}
+
+	/*
+	 * We have a valid, initialized EDAC instance bound to the
+	 * controller. Attempt to register it with the EDAC subsystem
+	 * and, if necessary, register interrupts.
+	 */
+
+	if (edac_mc_add_mc(mci)) {
+		ppc4xx_edac_mc_printk(KERN_ERR, mci,
+				      "Failed to add instance!\n");
+		status = -ENODEV;
+		goto fail;
+	}
+
+	if (edac_op_state == EDAC_OPSTATE_INT) {
+		status = ppc4xx_edac_register_irq(op, mci);
+
+		if (status)
+			goto fail1;
+	}
+
+	ppc4xx_edac_instance++;
+
+	return 0;
+
+ fail1:
+	edac_mc_del_mc(mci->dev);
+
+ fail:
+	edac_mc_free(mci);
+
+ done:
+	return status;
+}
+
+/**
+ * ppc4xx_edac_remove - unbind driver from controller
+ * @op: A pointer to the OpenFirmware device tree node associated
+ *      with the controller this EDAC instance is to be unbound/removed
+ *      from.
+ *
+ * This routine unbinds the EDAC memory controller instance associated
+ * with the specified ibm,sdram-4xx-ddr2 controller described by the
+ * OpenFirmware device tree node passed as a parameter.
+ *
+ * Unconditionally returns 0.
+ */
+static int
+ppc4xx_edac_remove(struct of_device *op)
+{
+	struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
+	struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
+
+	if (edac_op_state == EDAC_OPSTATE_INT) {
+		free_irq(pdata->irqs.sec, mci);
+		free_irq(pdata->irqs.ded, mci);
+	}
+
+	dcr_unmap(pdata->dcr_host, SDRAM_DCR_RESOURCE_LEN);
+
+	edac_mc_del_mc(mci->dev);
+	edac_mc_free(mci);
+
+	return 0;
+}
+
+/**
+ * ppc4xx_edac_opstate_init - initialize EDAC reporting method
+ *
+ * This routine ensures that the EDAC memory controller reporting
+ * method is mapped to a sane value as the EDAC core defines the value
+ * to EDAC_OPSTATE_INVAL by default. We don't call the global
+ * opstate_init as that defaults to polling and we want interrupt as
+ * the default.
+ */
+static inline void __init
+ppc4xx_edac_opstate_init(void)
+{
+	switch (edac_op_state) {
+	case EDAC_OPSTATE_POLL:
+	case EDAC_OPSTATE_INT:
+		break;
+	default:
+		edac_op_state = EDAC_OPSTATE_INT;
+		break;
+	}
+
+	ppc4xx_edac_printk(KERN_INFO, "Reporting type: %s\n",
+			   ((edac_op_state == EDAC_OPSTATE_POLL) ?
+			    EDAC_OPSTATE_POLL_STR :
+			    ((edac_op_state == EDAC_OPSTATE_INT) ?
+			     EDAC_OPSTATE_INT_STR :
+			     EDAC_OPSTATE_UNKNOWN_STR)));
+}
+
+/**
+ * ppc4xx_edac_init - driver/module insertion entry point
+ *
+ * This routine is the driver/module insertion entry point. It
+ * initializes the EDAC memory controller reporting state and
+ * registers the driver as an OpenFirmware device tree platform
+ * driver.
+ */
+static int __init
+ppc4xx_edac_init(void)
+{
+	ppc4xx_edac_printk(KERN_INFO, PPC4XX_EDAC_MODULE_REVISION "\n");
+
+	ppc4xx_edac_opstate_init();
+
+	return of_register_platform_driver(&ppc4xx_edac_driver);
+}
+
+/**
+ * ppc4xx_edac_exit - driver/module removal entry point
+ *
+ * This routine is the driver/module removal entry point. It
+ * unregisters the driver as an OpenFirmware device tree platform
+ * driver.
+ */
+static void __exit
+ppc4xx_edac_exit(void)
+{
+	of_unregister_platform_driver(&ppc4xx_edac_driver);
+}
+
+module_init(ppc4xx_edac_init);
+module_exit(ppc4xx_edac_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Grant Erickson <gerickson@nuovations.com>");
+MODULE_DESCRIPTION("EDAC MC Driver for the PPC4xx IBM DDR2 Memory Controller");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting State: "
+		 "0=" EDAC_OPSTATE_POLL_STR ", 2=" EDAC_OPSTATE_INT_STR);
diff --git a/drivers/edac/ppc4xx_edac.h b/drivers/edac/ppc4xx_edac.h
new file mode 100644
index 0000000..d315476
--- /dev/null
+++ b/drivers/edac/ppc4xx_edac.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2008 Nuovation System Designs, LLC
+ *   Grant Erickson <gerickson@nuovations.com>
+ *
+ * This file defines processor mnemonics for accessing and managing
+ * the IBM DDR1/DDR2 ECC controller found in the 405EX[r], 440SP,
+ * 440SPe, 460EX, 460GT and 460SX.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ */
+
+#ifndef __PPC4XX_EDAC_H
+#define __PPC4XX_EDAC_H
+
+#include <linux/types.h>
+
+/*
+ * Macro for generating register field mnemonics
+ */
+#define PPC_REG_BITS			32
+#define PPC_REG_VAL(bit, val)		((val) << ((PPC_REG_BITS - 1) - (bit)))
+#define PPC_REG_DECODE(bit, val)	((val) >> ((PPC_REG_BITS - 1) - (bit)))
+
+/*
+ * IBM 4xx DDR1/DDR2 SDRAM memory controller registers (at least those
+ * relevant to ECC)
+ */
+#define SDRAM_BESR			0x00	/* Error status (read/clear) */
+#define SDRAM_BESRT			0x01	/* Error statuss (test/set)  */
+#define SDRAM_BEARL			0x02	/* Error address low	     */
+#define SDRAM_BEARH			0x03	/* Error address high	     */
+#define SDRAM_WMIRQ			0x06	/* Write master (read/clear) */
+#define SDRAM_WMIRQT			0x07	/* Write master (test/set)   */
+#define SDRAM_MCOPT1			0x20	/* Controller options 1	     */
+#define SDRAM_MBXCF_BASE		0x40	/* Bank n configuration base */
+#define	SDRAM_MBXCF(n)			(SDRAM_MBXCF_BASE + (4 * (n)))
+#define SDRAM_MB0CF			SDRAM_MBXCF(0)
+#define SDRAM_MB1CF			SDRAM_MBXCF(1)
+#define SDRAM_MB2CF			SDRAM_MBXCF(2)
+#define SDRAM_MB3CF			SDRAM_MBXCF(3)
+#define SDRAM_ECCCR			0x98	/* ECC error status	     */
+#define SDRAM_ECCES			SDRAM_ECCCR
+
+/*
+ * PLB Master IDs
+ */
+#define	SDRAM_PLB_M0ID_FIRST		0
+#define	SDRAM_PLB_M0ID_ICU		SDRAM_PLB_M0ID_FIRST
+#define	SDRAM_PLB_M0ID_PCIE0		1
+#define	SDRAM_PLB_M0ID_PCIE1		2
+#define	SDRAM_PLB_M0ID_DMA		3
+#define	SDRAM_PLB_M0ID_DCU		4
+#define	SDRAM_PLB_M0ID_OPB		5
+#define	SDRAM_PLB_M0ID_MAL		6
+#define	SDRAM_PLB_M0ID_SEC		7
+#define	SDRAM_PLB_M0ID_AHB		8
+#define SDRAM_PLB_M0ID_LAST		SDRAM_PLB_M0ID_AHB
+#define SDRAM_PLB_M0ID_COUNT		(SDRAM_PLB_M0ID_LAST - \
+					 SDRAM_PLB_M0ID_FIRST + 1)
+
+/*
+ * Memory Controller Bus Error Status Register
+ */
+#define SDRAM_BESR_MASK			PPC_REG_VAL(7, 0xFF)
+#define SDRAM_BESR_M0ID_MASK		PPC_REG_VAL(3, 0xF)
+#define	SDRAM_BESR_M0ID_DECODE(n)	PPC_REG_DECODE(3, n)
+#define SDRAM_BESR_M0ID_ICU		PPC_REG_VAL(3, SDRAM_PLB_M0ID_ICU)
+#define SDRAM_BESR_M0ID_PCIE0		PPC_REG_VAL(3, SDRAM_PLB_M0ID_PCIE0)
+#define SDRAM_BESR_M0ID_PCIE1		PPC_REG_VAL(3, SDRAM_PLB_M0ID_PCIE1)
+#define SDRAM_BESR_M0ID_DMA		PPC_REG_VAL(3, SDRAM_PLB_M0ID_DMA)
+#define SDRAM_BESR_M0ID_DCU		PPC_REG_VAL(3, SDRAM_PLB_M0ID_DCU)
+#define SDRAM_BESR_M0ID_OPB		PPC_REG_VAL(3, SDRAM_PLB_M0ID_OPB)
+#define SDRAM_BESR_M0ID_MAL		PPC_REG_VAL(3, SDRAM_PLB_M0ID_MAL)
+#define SDRAM_BESR_M0ID_SEC		PPC_REG_VAL(3, SDRAM_PLB_M0ID_SEC)
+#define SDRAM_BESR_M0ID_AHB		PPC_REG_VAL(3, SDRAM_PLB_M0ID_AHB)
+#define SDRAM_BESR_M0ET_MASK		PPC_REG_VAL(6, 0x7)
+#define SDRAM_BESR_M0ET_NONE		PPC_REG_VAL(6, 0)
+#define SDRAM_BESR_M0ET_ECC		PPC_REG_VAL(6, 1)
+#define SDRAM_BESR_M0RW_MASK		PPC_REG_VAL(7, 1)
+#define SDRAM_BESR_M0RW_WRITE		PPC_REG_VAL(7, 0)
+#define SDRAM_BESR_M0RW_READ		PPC_REG_VAL(7, 1)
+
+/*
+ * Memory Controller PLB Write Master Interrupt Register
+ */
+#define SDRAM_WMIRQ_MASK		PPC_REG_VAL(8, 0x1FF)
+#define	SDRAM_WMIRQ_ENCODE(id)		PPC_REG_VAL((id % \
+						     SDRAM_PLB_M0ID_COUNT), 1)
+#define SDRAM_WMIRQ_ICU			PPC_REG_VAL(SDRAM_PLB_M0ID_ICU, 1)
+#define SDRAM_WMIRQ_PCIE0		PPC_REG_VAL(SDRAM_PLB_M0ID_PCIE0, 1)
+#define SDRAM_WMIRQ_PCIE1		PPC_REG_VAL(SDRAM_PLB_M0ID_PCIE1, 1)
+#define SDRAM_WMIRQ_DMA			PPC_REG_VAL(SDRAM_PLB_M0ID_DMA, 1)
+#define SDRAM_WMIRQ_DCU			PPC_REG_VAL(SDRAM_PLB_M0ID_DCU, 1)
+#define SDRAM_WMIRQ_OPB			PPC_REG_VAL(SDRAM_PLB_M0ID_OPB, 1)
+#define SDRAM_WMIRQ_MAL			PPC_REG_VAL(SDRAM_PLB_M0ID_MAL, 1)
+#define SDRAM_WMIRQ_SEC			PPC_REG_VAL(SDRAM_PLB_M0ID_SEC, 1)
+#define SDRAM_WMIRQ_AHB			PPC_REG_VAL(SDRAM_PLB_M0ID_AHB, 1)
+
+/*
+ * Memory Controller Options 1 Register
+ */
+#define SDRAM_MCOPT1_MCHK_MASK	    PPC_REG_VAL(3, 0x3)	 /* ECC mask	     */
+#define SDRAM_MCOPT1_MCHK_NON	    PPC_REG_VAL(3, 0x0)	 /* No ECC gen	     */
+#define SDRAM_MCOPT1_MCHK_GEN	    PPC_REG_VAL(3, 0x2)	 /* ECC gen	     */
+#define SDRAM_MCOPT1_MCHK_CHK	    PPC_REG_VAL(3, 0x1)	 /* ECC gen and chk  */
+#define SDRAM_MCOPT1_MCHK_CHK_REP   PPC_REG_VAL(3, 0x3)	 /* ECC gen/chk/rpt  */
+#define SDRAM_MCOPT1_MCHK_DECODE(n) ((((u32)(n)) >> 28) & 0x3)
+#define SDRAM_MCOPT1_RDEN_MASK	    PPC_REG_VAL(4, 0x1)	 /* Rgstrd DIMM mask */
+#define SDRAM_MCOPT1_RDEN	    PPC_REG_VAL(4, 0x1)	 /* Rgstrd DIMM enbl */
+#define SDRAM_MCOPT1_WDTH_MASK	    PPC_REG_VAL(7, 0x1)	 /* Width mask	     */
+#define SDRAM_MCOPT1_WDTH_32	    PPC_REG_VAL(7, 0x0)	 /* 32 bits	     */
+#define SDRAM_MCOPT1_WDTH_16	    PPC_REG_VAL(7, 0x1)	 /* 16 bits	     */
+#define SDRAM_MCOPT1_DDR_TYPE_MASK  PPC_REG_VAL(11, 0x1) /* DDR type mask    */
+#define SDRAM_MCOPT1_DDR1_TYPE	    PPC_REG_VAL(11, 0x0) /* DDR1 type	     */
+#define SDRAM_MCOPT1_DDR2_TYPE	    PPC_REG_VAL(11, 0x1) /* DDR2 type	     */
+
+/*
+ * Memory Bank 0 - n Configuration Register
+ */
+#define SDRAM_MBCF_BA_MASK		PPC_REG_VAL(12, 0x1FFF)
+#define SDRAM_MBCF_SZ_MASK		PPC_REG_VAL(19, 0xF)
+#define SDRAM_MBCF_SZ_DECODE(mbxcf)	PPC_REG_DECODE(19, mbxcf)
+#define SDRAM_MBCF_SZ_4MB		PPC_REG_VAL(19, 0x0)
+#define SDRAM_MBCF_SZ_8MB		PPC_REG_VAL(19, 0x1)
+#define SDRAM_MBCF_SZ_16MB		PPC_REG_VAL(19, 0x2)
+#define SDRAM_MBCF_SZ_32MB		PPC_REG_VAL(19, 0x3)
+#define SDRAM_MBCF_SZ_64MB		PPC_REG_VAL(19, 0x4)
+#define SDRAM_MBCF_SZ_128MB		PPC_REG_VAL(19, 0x5)
+#define SDRAM_MBCF_SZ_256MB		PPC_REG_VAL(19, 0x6)
+#define SDRAM_MBCF_SZ_512MB		PPC_REG_VAL(19, 0x7)
+#define SDRAM_MBCF_SZ_1GB		PPC_REG_VAL(19, 0x8)
+#define SDRAM_MBCF_SZ_2GB		PPC_REG_VAL(19, 0x9)
+#define SDRAM_MBCF_SZ_4GB		PPC_REG_VAL(19, 0xA)
+#define SDRAM_MBCF_SZ_8GB		PPC_REG_VAL(19, 0xB)
+#define SDRAM_MBCF_AM_MASK		PPC_REG_VAL(23, 0xF)
+#define SDRAM_MBCF_AM_MODE0		PPC_REG_VAL(23, 0x0)
+#define SDRAM_MBCF_AM_MODE1		PPC_REG_VAL(23, 0x1)
+#define SDRAM_MBCF_AM_MODE2		PPC_REG_VAL(23, 0x2)
+#define SDRAM_MBCF_AM_MODE3		PPC_REG_VAL(23, 0x3)
+#define SDRAM_MBCF_AM_MODE4		PPC_REG_VAL(23, 0x4)
+#define SDRAM_MBCF_AM_MODE5		PPC_REG_VAL(23, 0x5)
+#define SDRAM_MBCF_AM_MODE6		PPC_REG_VAL(23, 0x6)
+#define SDRAM_MBCF_AM_MODE7		PPC_REG_VAL(23, 0x7)
+#define SDRAM_MBCF_AM_MODE8		PPC_REG_VAL(23, 0x8)
+#define SDRAM_MBCF_AM_MODE9		PPC_REG_VAL(23, 0x9)
+#define SDRAM_MBCF_BE_MASK		PPC_REG_VAL(31, 0x1)
+#define SDRAM_MBCF_BE_DISABLE		PPC_REG_VAL(31, 0x0)
+#define SDRAM_MBCF_BE_ENABLE		PPC_REG_VAL(31, 0x1)
+
+/*
+ * ECC Error Status
+ */
+#define SDRAM_ECCES_MASK		PPC_REG_VAL(21, 0x3FFFFF)
+#define SDRAM_ECCES_BNCE_MASK		PPC_REG_VAL(15, 0xFFFF)
+#define SDRAM_ECCES_BNCE_ENCODE(lane)	PPC_REG_VAL(((lane) & 0xF), 1)
+#define SDRAM_ECCES_CKBER_MASK		PPC_REG_VAL(17, 0x3)
+#define SDRAM_ECCES_CKBER_NONE		PPC_REG_VAL(17, 0)
+#define SDRAM_ECCES_CKBER_16_ECC_0_3	PPC_REG_VAL(17, 2)
+#define SDRAM_ECCES_CKBER_32_ECC_0_3	PPC_REG_VAL(17, 1)
+#define SDRAM_ECCES_CKBER_32_ECC_4_8	PPC_REG_VAL(17, 2)
+#define SDRAM_ECCES_CKBER_32_ECC_0_8	PPC_REG_VAL(17, 3)
+#define SDRAM_ECCES_CE			PPC_REG_VAL(18, 1)
+#define SDRAM_ECCES_UE			PPC_REG_VAL(19, 1)
+#define SDRAM_ECCES_BKNER_MASK		PPC_REG_VAL(21, 0x3)
+#define SDRAM_ECCES_BK0ER		PPC_REG_VAL(20, 1)
+#define SDRAM_ECCES_BK1ER		PPC_REG_VAL(21, 1)
+
+#endif /* __PPC4XX_EDAC_H */
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
new file mode 100644
index 0000000..9900675
--- /dev/null
+++ b/drivers/edac/r82600_edac.c
@@ -0,0 +1,421 @@
+/*
+ * Radisys 82600 Embedded chipset Memory Controller kernel module
+ * (C) 2005 EADS Astrium
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Tim Small <tim@buttersideup.com>, based on work by Thayne
+ * Harbaugh, Dan Hollis <goemon at anime dot net> and others.
+ *
+ * $Id: edac_r82600.c,v 1.1.2.6 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ * Written with reference to 82600 High Integration Dual PCI System
+ * Controller Data Book:
+ * www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf
+ * references to this document given in []
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define R82600_REVISION	" Ver: 2.0.2 " __DATE__
+#define EDAC_MOD_STR	"r82600_edac"
+
+#define r82600_printk(level, fmt, arg...) \
+	edac_printk(level, "r82600", fmt, ##arg)
+
+#define r82600_mc_printk(mci, level, fmt, arg...) \
+	edac_mc_chipset_printk(mci, level, "r82600", fmt, ##arg)
+
+/* Radisys say "The 82600 integrates a main memory SDRAM controller that
+ * supports up to four banks of memory. The four banks can support a mix of
+ * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs,
+ * each of which can be any size from 16MB to 512MB. Both registered (control
+ * signals buffered) and unbuffered DIMM types are supported. Mixing of
+ * registered and unbuffered DIMMs as well as mixing of ECC and non-ECC DIMMs
+ * is not allowed. The 82600 SDRAM interface operates at the same frequency as
+ * the CPU bus, 66MHz, 100MHz or 133MHz."
+ */
+
+#define R82600_NR_CSROWS 4
+#define R82600_NR_CHANS  1
+#define R82600_NR_DIMMS  4
+
+#define R82600_BRIDGE_ID  0x8200
+
+/* Radisys 82600 register addresses - device 0 function 0 - PCI bridge */
+#define R82600_DRAMC	0x57	/* Various SDRAM related control bits
+				 * all bits are R/W
+				 *
+				 * 7    SDRAM ISA Hole Enable
+				 * 6    Flash Page Mode Enable
+				 * 5    ECC Enable: 1=ECC 0=noECC
+				 * 4    DRAM DIMM Type: 1=
+				 * 3    BIOS Alias Disable
+				 * 2    SDRAM BIOS Flash Write Enable
+				 * 1:0  SDRAM Refresh Rate: 00=Disabled
+				 *          01=7.8usec (256Mbit SDRAMs)
+				 *          10=15.6us 11=125usec
+				 */
+
+#define R82600_SDRAMC	0x76	/* "SDRAM Control Register"
+				 * More SDRAM related control bits
+				 * all bits are R/W
+				 *
+				 * 15:8 Reserved.
+				 *
+				 * 7:5  Special SDRAM Mode Select
+				 *
+				 * 4    Force ECC
+				 *
+				 *        1=Drive ECC bits to 0 during
+				 *          write cycles (i.e. ECC test mode)
+				 *
+				 *        0=Normal ECC functioning
+				 *
+				 * 3    Enhanced Paging Enable
+				 *
+				 * 2    CAS# Latency 0=3clks 1=2clks
+				 *
+				 * 1    RAS# to CAS# Delay 0=3 1=2
+				 *
+				 * 0    RAS# Precharge     0=3 1=2
+				 */
+
+#define R82600_EAP	0x80	/* ECC Error Address Pointer Register
+				 *
+				 * 31    Disable Hardware Scrubbing (RW)
+				 *        0=Scrub on corrected read
+				 *        1=Don't scrub on corrected read
+				 *
+				 * 30:12 Error Address Pointer (RO)
+				 *        Upper 19 bits of error address
+				 *
+				 * 11:4  Syndrome Bits (RO)
+				 *
+				 * 3     BSERR# on multibit error (RW)
+				 *        1=enable 0=disable
+				 *
+				 * 2     NMI on Single Bit Eror (RW)
+				 *        1=NMI triggered by SBE n.b. other
+				 *          prerequeists
+				 *        0=NMI not triggered
+				 *
+				 * 1     MBE (R/WC)
+				 *        read 1=MBE at EAP (see above)
+				 *        read 0=no MBE, or SBE occurred first
+				 *        write 1=Clear MBE status (must also
+				 *          clear SBE)
+				 *        write 0=NOP
+				 *
+				 * 1     SBE (R/WC)
+				 *        read 1=SBE at EAP (see above)
+				 *        read 0=no SBE, or MBE occurred first
+				 *        write 1=Clear SBE status (must also
+				 *          clear MBE)
+				 *        write 0=NOP
+				 */
+
+#define R82600_DRBA	0x60	/* + 0x60..0x63 SDRAM Row Boundry Address
+				 *  Registers
+				 *
+				 * 7:0  Address lines 30:24 - upper limit of
+				 * each row [p57]
+				 */
+
+struct r82600_error_info {
+	u32 eapr;
+};
+
+static unsigned int disable_hardware_scrub;
+
+static struct edac_pci_ctl_info *r82600_pci;
+
+static void r82600_get_error_info(struct mem_ctl_info *mci,
+				struct r82600_error_info *info)
+{
+	struct pci_dev *pdev;
+
+	pdev = to_pci_dev(mci->dev);
+	pci_read_config_dword(pdev, R82600_EAP, &info->eapr);
+
+	if (info->eapr & BIT(0))
+		/* Clear error to allow next error to be reported [p.62] */
+		pci_write_bits32(pdev, R82600_EAP,
+				 ((u32) BIT(0) & (u32) BIT(1)),
+				 ((u32) BIT(0) & (u32) BIT(1)));
+
+	if (info->eapr & BIT(1))
+		/* Clear error to allow next error to be reported [p.62] */
+		pci_write_bits32(pdev, R82600_EAP,
+				 ((u32) BIT(0) & (u32) BIT(1)),
+				 ((u32) BIT(0) & (u32) BIT(1)));
+}
+
+static int r82600_process_error_info(struct mem_ctl_info *mci,
+				struct r82600_error_info *info,
+				int handle_errors)
+{
+	int error_found;
+	u32 eapaddr, page;
+	u32 syndrome;
+
+	error_found = 0;
+
+	/* bits 30:12 store the upper 19 bits of the 32 bit error address */
+	eapaddr = ((info->eapr >> 12) & 0x7FFF) << 13;
+	/* Syndrome in bits 11:4 [p.62]       */
+	syndrome = (info->eapr >> 4) & 0xFF;
+
+	/* the R82600 reports at less than page *
+	 * granularity (upper 19 bits only)     */
+	page = eapaddr >> PAGE_SHIFT;
+
+	if (info->eapr & BIT(0)) {	/* CE? */
+		error_found = 1;
+
+		if (handle_errors)
+			edac_mc_handle_ce(mci, page, 0,	/* not avail */
+					syndrome,
+					edac_mc_find_csrow_by_page(mci, page),
+					0, mci->ctl_name);
+	}
+
+	if (info->eapr & BIT(1)) {	/* UE? */
+		error_found = 1;
+
+		if (handle_errors)
+			/* 82600 doesn't give enough info */
+			edac_mc_handle_ue(mci, page, 0,
+					edac_mc_find_csrow_by_page(mci, page),
+					mci->ctl_name);
+	}
+
+	return error_found;
+}
+
+static void r82600_check(struct mem_ctl_info *mci)
+{
+	struct r82600_error_info info;
+
+	debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+	r82600_get_error_info(mci, &info);
+	r82600_process_error_info(mci, &info, 1);
+}
+
+static inline int ecc_enabled(u8 dramcr)
+{
+	return dramcr & BIT(5);
+}
+
+static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
+			u8 dramcr)
+{
+	struct csrow_info *csrow;
+	int index;
+	u8 drbar;		/* SDRAM Row Boundry Address Register */
+	u32 row_high_limit, row_high_limit_last;
+	u32 reg_sdram, ecc_on, row_base;
+
+	ecc_on = ecc_enabled(dramcr);
+	reg_sdram = dramcr & BIT(4);
+	row_high_limit_last = 0;
+
+	for (index = 0; index < mci->nr_csrows; index++) {
+		csrow = &mci->csrows[index];
+
+		/* find the DRAM Chip Select Base address and mask */
+		pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
+
+		debugf1("%s() Row=%d DRBA = %#0x\n", __func__, index, drbar);
+
+		row_high_limit = ((u32) drbar << 24);
+/*		row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
+
+		debugf1("%s() Row=%d, Boundry Address=%#0x, Last = %#0x\n",
+			__func__, index, row_high_limit, row_high_limit_last);
+
+		/* Empty row [p.57] */
+		if (row_high_limit == row_high_limit_last)
+			continue;
+
+		row_base = row_high_limit_last;
+
+		csrow->first_page = row_base >> PAGE_SHIFT;
+		csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
+		csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+		/* Error address is top 19 bits - so granularity is      *
+		 * 14 bits                                               */
+		csrow->grain = 1 << 14;
+		csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
+		/* FIXME - check that this is unknowable with this chipset */
+		csrow->dtype = DEV_UNKNOWN;
+
+		/* Mode is global on 82600 */
+		csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
+		row_high_limit_last = row_high_limit;
+	}
+}
+
+static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
+{
+	struct mem_ctl_info *mci;
+	u8 dramcr;
+	u32 eapr;
+	u32 scrub_disabled;
+	u32 sdram_refresh_rate;
+	struct r82600_error_info discard;
+
+	debugf0("%s()\n", __func__);
+	pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
+	pci_read_config_dword(pdev, R82600_EAP, &eapr);
+	scrub_disabled = eapr & BIT(31);
+	sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
+	debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
+		sdram_refresh_rate);
+	debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
+	mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS, 0);
+
+	if (mci == NULL)
+		return -ENOMEM;
+
+	debugf0("%s(): mci = %p\n", __func__, mci);
+	mci->dev = &pdev->dev;
+	mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+	/* FIXME try to work out if the chip leads have been used for COM2
+	 * instead on this board? [MA6?] MAYBE:
+	 */
+
+	/* On the R82600, the pins for memory bits 72:65 - i.e. the   *
+	 * EC bits are shared with the pins for COM2 (!), so if COM2  *
+	 * is enabled, we assume COM2 is wired up, and thus no EDAC   *
+	 * is possible.                                               */
+	mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+
+	if (ecc_enabled(dramcr)) {
+		if (scrub_disabled)
+			debugf3("%s(): mci = %p - Scrubbing disabled! EAP: "
+				"%#0x\n", __func__, mci, eapr);
+	} else
+		mci->edac_cap = EDAC_FLAG_NONE;
+
+	mci->mod_name = EDAC_MOD_STR;
+	mci->mod_ver = R82600_REVISION;
+	mci->ctl_name = "R82600";
+	mci->dev_name = pci_name(pdev);
+	mci->edac_check = r82600_check;
+	mci->ctl_page_to_phys = NULL;
+	r82600_init_csrows(mci, pdev, dramcr);
+	r82600_get_error_info(mci, &discard);	/* clear counters */
+
+	/* Here we assume that we will never see multiple instances of this
+	 * type of memory controller.  The ID is therefore hardcoded to 0.
+	 */
+	if (edac_mc_add_mc(mci)) {
+		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+		goto fail;
+	}
+
+	/* get this far and it's successful */
+
+	if (disable_hardware_scrub) {
+		debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n",
+			__func__);
+		pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31));
+	}
+
+	/* allocating generic PCI control info */
+	r82600_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!r82600_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n",
+			__func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
+
+	debugf3("%s(): success\n", __func__);
+	return 0;
+
+fail:
+	edac_mc_free(mci);
+	return -ENODEV;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit r82600_init_one(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	debugf0("%s()\n", __func__);
+
+	/* don't need to call pci_device_enable() */
+	return r82600_probe1(pdev, ent->driver_data);
+}
+
+static void __devexit r82600_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+
+	debugf0("%s()\n", __func__);
+
+	if (r82600_pci)
+		edac_pci_release_generic_ctl(r82600_pci);
+
+	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+		return;
+
+	edac_mc_free(mci);
+}
+
+static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
+	{
+	 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
+	 },
+	{
+	 0,
+	 }			/* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, r82600_pci_tbl);
+
+static struct pci_driver r82600_driver = {
+	.name = EDAC_MOD_STR,
+	.probe = r82600_init_one,
+	.remove = __devexit_p(r82600_remove_one),
+	.id_table = r82600_pci_tbl,
+};
+
+static int __init r82600_init(void)
+{
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
+	return pci_register_driver(&r82600_driver);
+}
+
+static void __exit r82600_exit(void)
+{
+	pci_unregister_driver(&r82600_driver);
+}
+
+module_init(r82600_init);
+module_exit(r82600_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
+		"on behalf of EADS Astrium");
+MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
+
+module_param(disable_hardware_scrub, bool, 0644);
+MODULE_PARM_DESC(disable_hardware_scrub,
+		 "If set, disable the chipset's automatic scrub for CEs");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
new file mode 100644
index 0000000..d4ec605
--- /dev/null
+++ b/drivers/edac/x38_edac.c
@@ -0,0 +1,524 @@
+/*
+ * Intel X38 Memory Controller kernel module
+ * Copyright (C) 2008 Cluster Computing, Inc.
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * This file is based on i3200_edac.c
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define X38_REVISION		"1.1"
+
+#define EDAC_MOD_STR		"x38_edac"
+
+#define PCI_DEVICE_ID_INTEL_X38_HB	0x29e0
+
+#define X38_RANKS		8
+#define X38_RANKS_PER_CHANNEL	4
+#define X38_CHANNELS		2
+
+/* Intel X38 register addresses - device 0 function 0 - DRAM Controller */
+
+#define X38_MCHBAR_LOW	0x48	/* MCH Memory Mapped Register BAR */
+#define X38_MCHBAR_HIGH	0x4c
+#define X38_MCHBAR_MASK	0xfffffc000ULL	/* bits 35:14 */
+#define X38_MMR_WINDOW_SIZE	16384
+
+#define X38_TOM	0xa0	/* Top of Memory (16b)
+				 *
+				 * 15:10 reserved
+				 *  9:0  total populated physical memory
+				 */
+#define X38_TOM_MASK	0x3ff	/* bits 9:0 */
+#define X38_TOM_SHIFT 26	/* 64MiB grain */
+
+#define X38_ERRSTS	0xc8	/* Error Status Register (16b)
+				 *
+				 * 15    reserved
+				 * 14    Isochronous TBWRR Run Behind FIFO Full
+				 *       (ITCV)
+				 * 13    Isochronous TBWRR Run Behind FIFO Put
+				 *       (ITSTV)
+				 * 12    reserved
+				 * 11    MCH Thermal Sensor Event
+				 *       for SMI/SCI/SERR (GTSE)
+				 * 10    reserved
+				 *  9    LOCK to non-DRAM Memory Flag (LCKF)
+				 *  8    reserved
+				 *  7    DRAM Throttle Flag (DTF)
+				 *  6:2  reserved
+				 *  1    Multi-bit DRAM ECC Error Flag (DMERR)
+				 *  0    Single-bit DRAM ECC Error Flag (DSERR)
+				 */
+#define X38_ERRSTS_UE		0x0002
+#define X38_ERRSTS_CE		0x0001
+#define X38_ERRSTS_BITS	(X38_ERRSTS_UE | X38_ERRSTS_CE)
+
+
+/* Intel  MMIO register space - device 0 function 0 - MMR space */
+
+#define X38_C0DRB	0x200	/* Channel 0 DRAM Rank Boundary (16b x 4)
+				 *
+				 * 15:10 reserved
+				 *  9:0  Channel 0 DRAM Rank Boundary Address
+				 */
+#define X38_C1DRB	0x600	/* Channel 1 DRAM Rank Boundary (16b x 4) */
+#define X38_DRB_MASK	0x3ff	/* bits 9:0 */
+#define X38_DRB_SHIFT 26	/* 64MiB grain */
+
+#define X38_C0ECCERRLOG 0x280	/* Channel 0 ECC Error Log (64b)
+				 *
+				 * 63:48 Error Column Address (ERRCOL)
+				 * 47:32 Error Row Address (ERRROW)
+				 * 31:29 Error Bank Address (ERRBANK)
+				 * 28:27 Error Rank Address (ERRRANK)
+				 * 26:24 reserved
+				 * 23:16 Error Syndrome (ERRSYND)
+				 * 15: 2 reserved
+				 *    1  Multiple Bit Error Status (MERRSTS)
+				 *    0  Correctable Error Status (CERRSTS)
+				 */
+#define X38_C1ECCERRLOG 0x680	/* Channel 1 ECC Error Log (64b) */
+#define X38_ECCERRLOG_CE	0x1
+#define X38_ECCERRLOG_UE	0x2
+#define X38_ECCERRLOG_RANK_BITS	0x18000000
+#define X38_ECCERRLOG_SYNDROME_BITS	0xff0000
+
+#define X38_CAPID0 0xe0	/* see P.94 of spec for details */
+
+static int x38_channel_num;
+
+static int how_many_channel(struct pci_dev *pdev)
+{
+	unsigned char capid0_8b; /* 8th byte of CAPID0 */
+
+	pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b);
+	if (capid0_8b & 0x20) {	/* check DCD: Dual Channel Disable */
+		debugf0("In single channel mode.\n");
+		x38_channel_num = 1;
+	} else {
+		debugf0("In dual channel mode.\n");
+		x38_channel_num = 2;
+	}
+
+	return x38_channel_num;
+}
+
+static unsigned long eccerrlog_syndrome(u64 log)
+{
+	return (log & X38_ECCERRLOG_SYNDROME_BITS) >> 16;
+}
+
+static int eccerrlog_row(int channel, u64 log)
+{
+	return ((log & X38_ECCERRLOG_RANK_BITS) >> 27) |
+		(channel * X38_RANKS_PER_CHANNEL);
+}
+
+enum x38_chips {
+	X38 = 0,
+};
+
+struct x38_dev_info {
+	const char *ctl_name;
+};
+
+struct x38_error_info {
+	u16 errsts;
+	u16 errsts2;
+	u64 eccerrlog[X38_CHANNELS];
+};
+
+static const struct x38_dev_info x38_devs[] = {
+	[X38] = {
+		.ctl_name = "x38"},
+};
+
+static struct pci_dev *mci_pdev;
+static int x38_registered = 1;
+
+
+static void x38_clear_error_info(struct mem_ctl_info *mci)
+{
+	struct pci_dev *pdev;
+
+	pdev = to_pci_dev(mci->dev);
+
+	/*
+	 * Clear any error bits.
+	 * (Yes, we really clear bits by writing 1 to them.)
+	 */
+	pci_write_bits16(pdev, X38_ERRSTS, X38_ERRSTS_BITS,
+			 X38_ERRSTS_BITS);
+}
+
+static u64 x38_readq(const void __iomem *addr)
+{
+	return readl(addr) | (((u64)readl(addr + 4)) << 32);
+}
+
+static void x38_get_and_clear_error_info(struct mem_ctl_info *mci,
+				 struct x38_error_info *info)
+{
+	struct pci_dev *pdev;
+	void __iomem *window = mci->pvt_info;
+
+	pdev = to_pci_dev(mci->dev);
+
+	/*
+	 * This is a mess because there is no atomic way to read all the
+	 * registers at once and the registers can transition from CE being
+	 * overwritten by UE.
+	 */
+	pci_read_config_word(pdev, X38_ERRSTS, &info->errsts);
+	if (!(info->errsts & X38_ERRSTS_BITS))
+		return;
+
+	info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
+	if (x38_channel_num == 2)
+		info->eccerrlog[1] = x38_readq(window + X38_C1ECCERRLOG);
+
+	pci_read_config_word(pdev, X38_ERRSTS, &info->errsts2);
+
+	/*
+	 * If the error is the same for both reads then the first set
+	 * of reads is valid.  If there is a change then there is a CE
+	 * with no info and the second set of reads is valid and
+	 * should be UE info.
+	 */
+	if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
+		info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
+		if (x38_channel_num == 2)
+			info->eccerrlog[1] =
+				x38_readq(window + X38_C1ECCERRLOG);
+	}
+
+	x38_clear_error_info(mci);
+}
+
+static void x38_process_error_info(struct mem_ctl_info *mci,
+				struct x38_error_info *info)
+{
+	int channel;
+	u64 log;
+
+	if (!(info->errsts & X38_ERRSTS_BITS))
+		return;
+
+	if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
+		edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+		info->errsts = info->errsts2;
+	}
+
+	for (channel = 0; channel < x38_channel_num; channel++) {
+		log = info->eccerrlog[channel];
+		if (log & X38_ECCERRLOG_UE) {
+			edac_mc_handle_ue(mci, 0, 0,
+				eccerrlog_row(channel, log), "x38 UE");
+		} else if (log & X38_ECCERRLOG_CE) {
+			edac_mc_handle_ce(mci, 0, 0,
+				eccerrlog_syndrome(log),
+				eccerrlog_row(channel, log), 0, "x38 CE");
+		}
+	}
+}
+
+static void x38_check(struct mem_ctl_info *mci)
+{
+	struct x38_error_info info;
+
+	debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+	x38_get_and_clear_error_info(mci, &info);
+	x38_process_error_info(mci, &info);
+}
+
+
+void __iomem *x38_map_mchbar(struct pci_dev *pdev)
+{
+	union {
+		u64 mchbar;
+		struct {
+			u32 mchbar_low;
+			u32 mchbar_high;
+		};
+	} u;
+	void __iomem *window;
+
+	pci_read_config_dword(pdev, X38_MCHBAR_LOW, &u.mchbar_low);
+	pci_write_config_dword(pdev, X38_MCHBAR_LOW, u.mchbar_low | 0x1);
+	pci_read_config_dword(pdev, X38_MCHBAR_HIGH, &u.mchbar_high);
+	u.mchbar &= X38_MCHBAR_MASK;
+
+	if (u.mchbar != (resource_size_t)u.mchbar) {
+		printk(KERN_ERR
+			"x38: mmio space beyond accessible range (0x%llx)\n",
+			(unsigned long long)u.mchbar);
+		return NULL;
+	}
+
+	window = ioremap_nocache(u.mchbar, X38_MMR_WINDOW_SIZE);
+	if (!window)
+		printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n",
+			(unsigned long long)u.mchbar);
+
+	return window;
+}
+
+
+static void x38_get_drbs(void __iomem *window,
+			u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
+{
+	int i;
+
+	for (i = 0; i < X38_RANKS_PER_CHANNEL; i++) {
+		drbs[0][i] = readw(window + X38_C0DRB + 2*i) & X38_DRB_MASK;
+		drbs[1][i] = readw(window + X38_C1DRB + 2*i) & X38_DRB_MASK;
+	}
+}
+
+static bool x38_is_stacked(struct pci_dev *pdev,
+			u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
+{
+	u16 tom;
+
+	pci_read_config_word(pdev, X38_TOM, &tom);
+	tom &= X38_TOM_MASK;
+
+	return drbs[X38_CHANNELS - 1][X38_RANKS_PER_CHANNEL - 1] == tom;
+}
+
+static unsigned long drb_to_nr_pages(
+			u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL],
+			bool stacked, int channel, int rank)
+{
+	int n;
+
+	n = drbs[channel][rank];
+	if (rank > 0)
+		n -= drbs[channel][rank - 1];
+	if (stacked && (channel == 1) && drbs[channel][rank] ==
+				drbs[channel][X38_RANKS_PER_CHANNEL - 1]) {
+		n -= drbs[0][X38_RANKS_PER_CHANNEL - 1];
+	}
+
+	n <<= (X38_DRB_SHIFT - PAGE_SHIFT);
+	return n;
+}
+
+static int x38_probe1(struct pci_dev *pdev, int dev_idx)
+{
+	int rc;
+	int i;
+	struct mem_ctl_info *mci = NULL;
+	unsigned long last_page;
+	u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
+	bool stacked;
+	void __iomem *window;
+
+	debugf0("MC: %s()\n", __func__);
+
+	window = x38_map_mchbar(pdev);
+	if (!window)
+		return -ENODEV;
+
+	x38_get_drbs(window, drbs);
+
+	how_many_channel(pdev);
+
+	/* FIXME: unconventional pvt_info usage */
+	mci = edac_mc_alloc(0, X38_RANKS, x38_channel_num, 0);
+	if (!mci)
+		return -ENOMEM;
+
+	debugf3("MC: %s(): init mci\n", __func__);
+
+	mci->dev = &pdev->dev;
+	mci->mtype_cap = MEM_FLAG_DDR2;
+
+	mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+	mci->edac_cap = EDAC_FLAG_SECDED;
+
+	mci->mod_name = EDAC_MOD_STR;
+	mci->mod_ver = X38_REVISION;
+	mci->ctl_name = x38_devs[dev_idx].ctl_name;
+	mci->dev_name = pci_name(pdev);
+	mci->edac_check = x38_check;
+	mci->ctl_page_to_phys = NULL;
+	mci->pvt_info = window;
+
+	stacked = x38_is_stacked(pdev, drbs);
+
+	/*
+	 * The dram rank boundary (DRB) reg values are boundary addresses
+	 * for each DRAM rank with a granularity of 64MB.  DRB regs are
+	 * cumulative; the last one will contain the total memory
+	 * contained in all ranks.
+	 */
+	last_page = -1UL;
+	for (i = 0; i < mci->nr_csrows; i++) {
+		unsigned long nr_pages;
+		struct csrow_info *csrow = &mci->csrows[i];
+
+		nr_pages = drb_to_nr_pages(drbs, stacked,
+			i / X38_RANKS_PER_CHANNEL,
+			i % X38_RANKS_PER_CHANNEL);
+
+		if (nr_pages == 0) {
+			csrow->mtype = MEM_EMPTY;
+			continue;
+		}
+
+		csrow->first_page = last_page + 1;
+		last_page += nr_pages;
+		csrow->last_page = last_page;
+		csrow->nr_pages = nr_pages;
+
+		csrow->grain = nr_pages << PAGE_SHIFT;
+		csrow->mtype = MEM_DDR2;
+		csrow->dtype = DEV_UNKNOWN;
+		csrow->edac_mode = EDAC_UNKNOWN;
+	}
+
+	x38_clear_error_info(mci);
+
+	rc = -ENODEV;
+	if (edac_mc_add_mc(mci)) {
+		debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
+		goto fail;
+	}
+
+	/* get this far and it's successful */
+	debugf3("MC: %s(): success\n", __func__);
+	return 0;
+
+fail:
+	iounmap(window);
+	if (mci)
+		edac_mc_free(mci);
+
+	return rc;
+}
+
+static int __devinit x38_init_one(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	int rc;
+
+	debugf0("MC: %s()\n", __func__);
+
+	if (pci_enable_device(pdev) < 0)
+		return -EIO;
+
+	rc = x38_probe1(pdev, ent->driver_data);
+	if (!mci_pdev)
+		mci_pdev = pci_dev_get(pdev);
+
+	return rc;
+}
+
+static void __devexit x38_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+
+	debugf0("%s()\n", __func__);
+
+	mci = edac_mc_del_mc(&pdev->dev);
+	if (!mci)
+		return;
+
+	iounmap(mci->pvt_info);
+
+	edac_mc_free(mci);
+}
+
+static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
+	{
+	 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 X38},
+	{
+	 0,
+	 }			/* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, x38_pci_tbl);
+
+static struct pci_driver x38_driver = {
+	.name = EDAC_MOD_STR,
+	.probe = x38_init_one,
+	.remove = __devexit_p(x38_remove_one),
+	.id_table = x38_pci_tbl,
+};
+
+static int __init x38_init(void)
+{
+	int pci_rc;
+
+	debugf3("MC: %s()\n", __func__);
+
+	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
+	opstate_init();
+
+	pci_rc = pci_register_driver(&x38_driver);
+	if (pci_rc < 0)
+		goto fail0;
+
+	if (!mci_pdev) {
+		x38_registered = 0;
+		mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+					PCI_DEVICE_ID_INTEL_X38_HB, NULL);
+		if (!mci_pdev) {
+			debugf0("x38 pci_get_device fail\n");
+			pci_rc = -ENODEV;
+			goto fail1;
+		}
+
+		pci_rc = x38_init_one(mci_pdev, x38_pci_tbl);
+		if (pci_rc < 0) {
+			debugf0("x38 init fail\n");
+			pci_rc = -ENODEV;
+			goto fail1;
+		}
+	}
+
+	return 0;
+
+fail1:
+	pci_unregister_driver(&x38_driver);
+
+fail0:
+	if (mci_pdev)
+		pci_dev_put(mci_pdev);
+
+	return pci_rc;
+}
+
+static void __exit x38_exit(void)
+{
+	debugf3("MC: %s()\n", __func__);
+
+	pci_unregister_driver(&x38_driver);
+	if (!x38_registered) {
+		x38_remove_one(mci_pdev);
+		pci_dev_put(mci_pdev);
+	}
+}
+
+module_init(x38_init);
+module_exit(x38_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cluster Computing, Inc. Hitoshi Mitake");
+MODULE_DESCRIPTION("MC support for Intel X38 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");