Merge branch 'mindspeed_drops' into HEAD

Conflicts:
	arch/arm/mach-comcerto/comcerto-2000.c
	drivers/mtd/nand/comcerto_nand.c

Enable CONFIG_SUSPEND because the new Mindspeed kernel would not compile
without it.

Change-Id: I336163cf5b3de56e4d38a135dd05d0e75c9ad2b2
diff --git a/arch/arm/configs/c2kmfcnevm_defconfig b/arch/arm/configs/c2kmfcnevm_defconfig
new file mode 100644
index 0000000..2e12bbf
--- /dev/null
+++ b/arch/arm/configs/c2kmfcnevm_defconfig
@@ -0,0 +1,1047 @@
+#
+# Automatically generated file; DO NOT EDIT.
+# Linux/arm 3.2.2 Kernel Configuration
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+# CONFIG_ARCH_USES_GETTIMEOFFSET is not set
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_ARCH_HAS_CPUFREQ=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_ARM_PATCH_PHYS_VIRT=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_HAVE_IRQ_WORK=y
+CONFIG_IRQ_WORK=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_DEFAULT_HOSTNAME="(none)"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+# CONFIG_FHANDLE is not set
+CONFIG_HAVE_GENERIC_HARDIRQS=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_HAVE_SPARSE_IRQ=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_IRQ_DOMAIN=y
+# CONFIG_SPARSE_IRQ is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=17
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+# CONFIG_CGROUP_FREEZER is not set
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
+CONFIG_CGROUP_CPUACCT=y
+# CONFIG_RESOURCE_COUNTERS is not set
+# CONFIG_CGROUP_PERF is not set
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_BLK_CGROUP is not set
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+CONFIG_USER_NS=y
+CONFIG_PID_NS=y
+# CONFIG_SCHED_AUTOGROUP is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
+# CONFIG_BLK_DEV_INITRD is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EXPERT is not set
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+# CONFIG_EMBEDDED is not set
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_PERF_COUNTERS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_PROFILING is not set
+CONFIG_TRACEPOINTS=y
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_BSGLIB is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+# CONFIG_FREEZER is not set
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_INTEGRATOR is not set
+CONFIG_ARCH_COMCERTO=y
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_HIGHBANK is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_PRIMA2 is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_MXS is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LPC32XX is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_TEGRA is not set
+# CONFIG_ARCH_PICOXCELL is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P64X0 is not set
+# CONFIG_ARCH_S5PC100 is not set
+# CONFIG_ARCH_S5PV210 is not set
+# CONFIG_ARCH_EXYNOS is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_TCC_926 is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_PLAT_SPEAR is not set
+# CONFIG_ARCH_VT8500 is not set
+# CONFIG_ARCH_ZYNQ is not set
+# CONFIG_ARCH_SUPPORTS_BIG_ENDIAN is not set
+
+#
+# System MMU
+#
+
+#
+# Comcerto Implementation Options
+#
+CONFIG_ARCH_M86XXX=y
+# CONFIG_C2K_EVM is not set
+CONFIG_C2K_MFCN_EVM=y
+# CONFIG_C2K_ASIC is not set
+# CONFIG_RTSM_C2K is not set
+CONFIG_MTD_COMCERTO_NOR=y
+CONFIG_COMCERTO_TDM_CLOCK=y
+# CONFIG_PCI is not set
+CONFIG_COMCERTO_NUM_PCIES=2
+CONFIG_COMCERTO_FP=y
+# CONFIG_COMCERTO_UART0_SUPPORT is not set
+CONFIG_COMCERTO_UART1_SUPPORT=y
+CONFIG_COMCERTO_USB0_SUPPORT=y
+# CONFIG_COMCERTO_USB1_SUPPORT is not set
+CONFIG_COMCERTO_IPSEC_SUPPORT=y
+CONFIG_COMCERTO_SPI_SUPPORT=y
+CONFIG_COMCERTO_FAST_SPI_SUPPORT=y
+CONFIG_COMCERTO_I2C_SUPPORT=y
+CONFIG_COMCERTO_DW_DMA_SUPPORT=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_V7=y
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_PABRT_V7=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_ARM_THUMBEE is not set
+# CONFIG_SWP_EMULATE is not set
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_OUTER_CACHE=y
+CONFIG_OUTER_CACHE_SYNC=y
+CONFIG_CACHE_L2X0=y
+CONFIG_CACHE_PL310=y
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+CONFIG_CPU_HAS_PMU=y
+# CONFIG_ARM_ERRATA_430973 is not set
+# CONFIG_ARM_ERRATA_458693 is not set
+# CONFIG_ARM_ERRATA_460075 is not set
+CONFIG_ARM_ERRATA_742230=y
+CONFIG_ARM_ERRATA_742231=y
+CONFIG_PL310_ERRATA_588369=y
+CONFIG_ARM_ERRATA_720789=y
+CONFIG_PL310_ERRATA_727915=y
+CONFIG_ARM_ERRATA_743622=y
+CONFIG_ARM_ERRATA_751472=y
+CONFIG_PL310_ERRATA_753970=y
+CONFIG_ARM_ERRATA_754322=y
+# CONFIG_ARM_ERRATA_754327 is not set
+CONFIG_ARM_ERRATA_764369=y
+CONFIG_PL310_ERRATA_769419=y
+CONFIG_ARM_GIC=y
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+# CONFIG_NO_HZ is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_SMP=y
+# CONFIG_SMP_ON_UP is not set
+CONFIG_ARM_CPU_TOPOLOGY=y
+# CONFIG_SCHED_MC is not set
+# CONFIG_SCHED_SMT is not set
+CONFIG_HAVE_ARM_SCU=y
+CONFIG_HAVE_ARM_TWD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_NR_CPUS=2
+# CONFIG_HOTPLUG_CPU is not set
+CONFIG_LOCAL_TIMERS=y
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=100
+# CONFIG_THUMB2_KERNEL is not set
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+CONFIG_HAVE_ARCH_PFN_VALID=y
+# CONFIG_HIGHMEM is not set
+CONFIG_HW_PERF_EVENTS=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_COMPACTION is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_CLEANCACHE is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_CC_STACKPROTECTOR is not set
+# CONFIG_DEPRECATED_PARAM_STRUCT is not set
+
+#
+# Boot options
+#
+# CONFIG_USE_OF is not set
+CONFIG_ZBOOT_ROM_TEXT=0x00608000
+CONFIG_ZBOOT_ROM_BSS=0
+CONFIG_ZBOOT_ROM=y
+CONFIG_CMDLINE=""
+# CONFIG_KEXEC is not set
+# CONFIG_CRASH_DUMP is not set
+
+#
+# CPU Power Management
+#
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+# CONFIG_SUSPEND is not set
+# CONFIG_PM_RUNTIME is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_ARM_CPU_SUSPEND is not set
+# CONFIG_NET is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_SM_FTL is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_RAM=y
+CONFIG_MTD_ROM=y
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_DOCG3 is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+# CONFIG_BLK_DEV is not set
+# CONFIG_SENSORS_LIS3LV02D is not set
+# CONFIG_MISC_DEVICES is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ADP5589 is not set
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_QT1070 is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MCS is not set
+# CONFIG_KEYBOARD_MPR121 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_SENTELIC is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_ALTERA_PS2 is not set
+# CONFIG_SERIO_PS2MULT is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_TRACE_SINK is not set
+CONFIG_DEVKMEM=y
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_SERIAL_8250_DETECT_IRQ is not set
+CONFIG_SERIAL_8250_RSA=y
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_XILINX_PS_UART is not set
+# CONFIG_HVC_DCC is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_RAMOOPS is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+# CONFIG_I2C_COMPAT is not set
+# CONFIG_I2C_CHARDEV is not set
+# CONFIG_I2C_MUX is not set
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_SMBUS=y
+
+#
+# I2C Algorithms
+#
+CONFIG_I2C_ALGOBIT=m
+# CONFIG_I2C_ALGOPCF is not set
+CONFIG_I2C_ALGOPCA=m
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+CONFIG_I2C_COMCERTO=y
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_PCA_PLATFORM=m
+# CONFIG_I2C_PXA_PCI is not set
+CONFIG_I2C_SIMTEC=m
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+CONFIG_I2C_PARPORT_LIGHT=m
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_SPI is not set
+CONFIG_SPI_MSPD=y
+CONFIG_COMCERTO_SPI=y
+
+#
+# Miscellaneous I2C Chip support
+#
+CONFIG_EEPROM_AT=y
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+
+#
+# Enable Device Drivers -> PPS to see the PTP clock options.
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB=y
+# CONFIG_SSB_DEBUG is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+# CONFIG_BCMA is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_STMPE is not set
+# CONFIG_MFD_TC3589X is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_MAX8997 is not set
+# CONFIG_MFD_MAX8998 is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_MFD_WL1273_CORE is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+CONFIG_DRM=m
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+# CONFIG_FB is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+# CONFIG_BACKLIGHT_ADP8860 is not set
+# CONFIG_BACKLIGHT_ADP8870 is not set
+
+#
+# Display device support
+#
+CONFIG_DISPLAY_SUPPORT=m
+
+#
+# Display hardware drivers
+#
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# Virtio drivers
+#
+# CONFIG_VIRTIO_BALLOON is not set
+# CONFIG_VIRTIO_MMIO is not set
+# CONFIG_STAGING is not set
+
+#
+# Hardware Spinlock drivers
+#
+CONFIG_IOMMU_SUPPORT=y
+# CONFIG_VIRT_DRIVERS is not set
+# CONFIG_PM_DEVFREQ is not set
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
+# CONFIG_FSNOTIFY is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+# CONFIG_FANOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_QUOTACTL is not set
+CONFIG_AUTOFS4_FS=y
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+# CONFIG_TMPFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=1
+# CONFIG_JFFS2_FS_WRITEBUFFER is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_PSTORE is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_NLS is not set
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+# CONFIG_LOCKUP_DETECTOR is not set
+# CONFIG_HARDLOCKUP_DETECTOR is not set
+# CONFIG_DETECT_HUNG_TASK is not set
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_SPARSE_RCU_POINTER is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_ATOMIC_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_TEST_LIST_SORT is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+# CONFIG_LKDTM is not set
+# CONFIG_FAULT_INJECTION is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_EVENT_POWER_TRACING_DEPRECATED=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_STRICT_DEVMEM is not set
+# CONFIG_ARM_UNWIND is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_LL is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+# CONFIG_CRYPTO is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+# CONFIG_CRC8 is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+# CONFIG_XZ_DEC is not set
+# CONFIG_XZ_DEC_BCJ is not set
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+# CONFIG_AVERAGE is not set
+# CONFIG_CORDIC is not set
diff --git a/arch/arm/configs/gfrg200_defconfig b/arch/arm/configs/gfrg200_defconfig
index e8008b1..3ac89f6 100644
--- a/arch/arm/configs/gfrg200_defconfig
+++ b/arch/arm/configs/gfrg200_defconfig
@@ -52,7 +52,6 @@
 CONFIG_VFP=y
 CONFIG_NEON=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-# CONFIG_SUSPEND is not set
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 020e99c..0a589b7 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -5,6 +5,13 @@
 #include <asm/glue-cache.h>
 #include <asm/glue-proc.h>
 #include <asm/system.h>
+
+#ifdef CONFIG_ARCH_M86XXX
+/* 
+ * IRAM Location to restore the banked registers
+ */
+#define SCRATCHPAD_CPU_CONTEXT_LOC_DBG	 (0xf0802570)
+#endif
 	.text
 
 /*
@@ -64,10 +71,66 @@
 ENDPROC(cpu_resume_mmu)
 cpu_resume_after_mmu:
 	bl	cpu_init		@ restore the und/abt/irq banked regs
+
+#ifdef CONFIG_ARCH_M86XXX
+	/* 
+	 * This code is added to restore the banked registers
+	 * This restore has to be kept here as it has to happen 
+	 * after MMU int.
+	 */
+
+	/* Restore cpsr and spsr */
+        ldr 	r7, scratchpad_cpu_context_loc_dbg
+
+	/* Restoring all the banked registers */
+        mrs     r0, cpsr
+
+        /* Restore the Undef mode reisters */
+        bic     r1, r0, #0x1f
+        orr     r1, r1, #0x1b
+        msr     cpsr_c, r1
+	ARM ( ldmia	r7!, {r13-r14} )
+	ldmia	r7!, {r6}
+	msr	spsr, r6
+
+        /* Restore the Abort mode reisters */
+        bic     r1, r0, #0x1f
+        orr     r1, r1, #0x17
+        msr     cpsr_c, r1
+	ARM ( ldmia	r7!, {r13-r14} )
+	ldmia	r7!, {r6}
+	msr	spsr, r6
+
+        /* Restore the IRQ mode reisters */
+        bic     r1, r0, #0x1f
+        orr     r1, r1, #0x12
+        msr     cpsr_c, r1
+	ARM ( ldmia	r7!, {r13-r14} )
+	ldmia	r7!, {r6}
+	msr	spsr, r6
+
+        /* Restore the FIQ mode reisters */
+        bic     r1, r0, #0x1f
+        orr     r1, r1, #0x11
+        msr     cpsr_c, r1
+	ARM ( ldmia	r7!, {r8-r14} )
+	THUMB ( ldmia	r7!, {r8-r12} )
+	ldmia	r7!, {r6}
+	msr	spsr, r6
+
+        /* Return to the original mode */
+        msr     cpsr_c, r0
+#endif
 	mov	r0, #0			@ return zero on success
 	ldmfd	sp!, {r4 - r11, pc}
 ENDPROC(cpu_resume_after_mmu)
 
+
+#ifdef CONFIG_ARCH_M86XXX
+scratchpad_cpu_context_loc_dbg:
+        .word   SCRATCHPAD_CPU_CONTEXT_LOC_DBG
+#endif
+
 /*
  * Note: Yes, part of the following code is located into the .data section.
  *       This is to allow sleep_save_sp to be accessed with a relative load
diff --git a/arch/arm/mach-comcerto/Kconfig b/arch/arm/mach-comcerto/Kconfig
index 2906e5c..f94f77c 100644
--- a/arch/arm/mach-comcerto/Kconfig
+++ b/arch/arm/mach-comcerto/Kconfig
@@ -58,6 +58,12 @@
 	help
 	  Say Y here if you intend to run this kernel with a C2K EVM.
 
+config C2K_MFCN_EVM
+	bool "MFCN EVM"
+	depends on ARCH_M86XXX
+	help
+	  Say Y here if you intend to run this kernel with a C2K MFCN EVM.
+
 config C2K_ASIC
 	bool "ASIC"
 	depends on ARCH_M86XXX
diff --git a/arch/arm/mach-comcerto/Makefile b/arch/arm/mach-comcerto/Makefile
index c480855..9cc213a 100644
--- a/arch/arm/mach-comcerto/Makefile
+++ b/arch/arm/mach-comcerto/Makefile
@@ -4,7 +4,7 @@
 
 # Object file lists.
 
-obj-y := gpio.o time.o dma.o clock.o reset.o
+obj-y := gpio.o time.o dma.o clock.o reset.o pm.o sleep.o
 obj-m :=
 obj-n :=
 obj- :=
@@ -23,6 +23,7 @@
 obj-$(CONFIG_RTSM_C2K) 				+= board-c2krtsm.o
 obj-$(CONFIG_GOOGLE_FIBER_OPTIMUS)		+= board-optimus.o
 obj-$(CONFIG_C2K_EVM) 				+= board-c2kevm.o
+obj-$(CONFIG_C2K_MFCN_EVM)			+= board-c2kmfcnevm.o
 obj-$(CONFIG_C2K_ASIC) 				+= board-c2kasic.o
 obj-$(CONFIG_SMP)				+= platsmp.o headsmp.o
 obj-$(CONFIG_COMCERTO_MSP)			+= msp/
diff --git a/arch/arm/mach-comcerto/board-c2kasic.c b/arch/arm/mach-comcerto/board-c2kasic.c
index 5e6d726..9097b28 100644
--- a/arch/arm/mach-comcerto/board-c2kasic.c
+++ b/arch/arm/mach-comcerto/board-c2kasic.c
@@ -56,7 +56,6 @@
 #include <asm/mach/time.h>
 #include <mach/gpio.h>
 
-
 extern void platform_reserve(void);
 extern void device_map_io (void);
 extern void device_irq_init(void);
@@ -270,19 +269,17 @@
 		.platform_data = &spi_pdata,
                 .controller_data = &spi_ctrl_data,
 	},
-
 	{
 		/* FIXME: for chipselect-1 */
 		.modalias = "proslic",
-		.chip_select = 1,
 		.max_speed_hz = 4*1000*1000,
+		.chip_select = 1,
+		.mode = SPI_MODE_3,
 		.bus_num = 0,
 		.irq = -1,
-		.mode = SPI_MODE_3,
 		.platform_data = &spi_pdata,
                 .controller_data = &spi_ctrl_data,
 	},
-
 	{
 		.modalias = "comcerto_spi3",
 		.chip_select = 2,
@@ -294,6 +291,19 @@
                 .controller_data = &spi_ctrl_data,
 	},
 
+#if 0 //MSIF
+
+	{
+		.modalias = "proslic",
+		.max_speed_hz = 2*1000*1000,
+		.chip_select = 3,
+                .mode = SPI_MODE_1,
+		.bus_num = 0,
+		.irq = -1,
+		.platform_data = &spi_pdata,
+                .controller_data = &spi_ctrl_data,
+	},
+#else
 	{
 		.modalias = "legerity",
 		.chip_select = 3,
@@ -304,7 +314,7 @@
 		.platform_data = &spi_pdata,
                 .controller_data = &spi_ctrl_data,
 	},
-
+#endif
 };
 #endif
 
@@ -485,7 +495,8 @@
 	.fspolarity = 0, /* 28 FSYNC_FALL(RISE)_EDGE */
 	.fshwidth = 1, /* High_Phase_Width[10:0] */
 	.fslwidth = 0xFF, /* Low_Phase_Width[10:0] */
-	.clockhz = 2048000, /* INC_VALUE[29:0] According to the desired TDM clock output frequency, this field should be configured */
+	.clockhz = 2048000, /* INC_VALUE[29:0] According to the desired TDM clock output 
+			frequency, this field should be configured */
 	.clockout = 1, /* 0 -> set bit 21, clear bit 20 in COMCERTO_GPIO_IOCTRL_REG
 			  (software control, clock input)
 			  1 -> set bit 21 and 20 in COMCERTO_GPIO_IOCTRL_REG
diff --git a/arch/arm/mach-comcerto/board-c2kevm.c b/arch/arm/mach-comcerto/board-c2kevm.c
index 9e898e8..a99dab7 100644
--- a/arch/arm/mach-comcerto/board-c2kevm.c
+++ b/arch/arm/mach-comcerto/board-c2kevm.c
@@ -259,19 +259,18 @@
 		.platform_data = &spi_pdata,
                 .controller_data = &spi_ctrl_data,
 	},
-
 	{
 		/* FIXME: for chipselect-1 */
 		.modalias = "proslic",
-		.chip_select = 1,
 		.max_speed_hz = 4*1000*1000,
+		.chip_select = 1,
+		.mode = SPI_MODE_3,
 		.bus_num = 0,
 		.irq = -1,
 		.mode = SPI_MODE_3,
 		.platform_data = &spi_pdata,
                 .controller_data = &spi_ctrl_data,
 	},
-
 	{
 		.modalias = "comcerto_spi3",
 		.chip_select = 2,
@@ -283,6 +282,20 @@
                 .controller_data = &spi_ctrl_data,
 	},
 
+#if 0 //MSIF
+
+	{
+		.modalias = "proslic",
+		.max_speed_hz = 2*1000*1000,
+		.chip_select = 3,
+		.mode = SPI_MODE_1,
+		.bus_num = 0,
+		.irq = -1,
+		.mode = SPI_MODE_3,
+		.platform_data = &spi_pdata,
+                .controller_data = &spi_ctrl_data,
+	},
+#else
 	{
 		.modalias = "legerity",
 		.chip_select = 3,
@@ -293,7 +306,7 @@
 		.platform_data = &spi_pdata,
                 .controller_data = &spi_ctrl_data,
 	},
-
+#endif
 };
 #endif
 
@@ -473,7 +486,8 @@
 	.fspolarity = 0, /* 28 FSYNC_FALL(RISE)_EDGE */
 	.fshwidth = 1, /* High_Phase_Width[10:0] */
 	.fslwidth = 0xFF, /* Low_Phase_Width[10:0] */
-	.clockhz = 2048000, /* INC_VALUE[29:0] According to the desired TDM clock output frequency, this field should be configured */
+	.clockhz = 2048000, /* INC_VALUE[29:0] According to the desired TDM clock output \
+			       frequency, this field should be configured */
 	.clockout = 1, /* 0 -> set bit 21, clear bit 20 in COMCERTO_GPIO_IOCTRL_REG
 			  (software control, clock input)
 			  1 -> set bit 21 and 20 in COMCERTO_GPIO_IOCTRL_REG
@@ -498,6 +512,40 @@
 	.resource = NULL,
 };
 
+#if defined(CONFIG_DSPG_DECT_CSS)
+#define CSS_ITCM_BASE		COMCERTO_AXI_DECT_BASE
+#define CSS_ITCM_SIZE		(SZ_1M)
+
+#define CSS_DTCM_BASE		(CSS_ITCM_BASE + CSS_ITCM_SIZE)
+#define CSS_DTCM_SIZE		(SZ_1M)
+
+static struct resource comcerto_css_resources[] = {
+	{
+		.name	= "itcm",
+		.start	= CSS_ITCM_BASE,
+		.end	= CSS_ITCM_BASE + CSS_ITCM_SIZE - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.name	= "dtcm",
+		.start	= CSS_DTCM_BASE,
+		.end	= CSS_DTCM_BASE + CSS_DTCM_SIZE - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device comcerto_css_device = {
+	.name		= "css",
+	.id		= 0,
+	.dev		= {
+		.platform_data = 0,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+	},
+	.num_resources	= ARRAY_SIZE(comcerto_css_resources),
+	.resource	= comcerto_css_resources,
+};
+#endif
+
 static struct resource comcerto_pfe_resources[] = {
 	{
 		.name	= "apb",
@@ -631,6 +679,9 @@
 		&comcerto_tdm_device,
 		&comcerto_pfe_device,
 		&rtc_dev,
+#if defined(CONFIG_DSPG_DECT_CSS)
+		&comcerto_css_device,
+#endif
 #if defined(CONFIG_COMCERTO_ELP_SUPPORT)
 	&comcerto_elp_device,
 #endif
@@ -644,11 +695,11 @@
 /* This variable is used by comcerto-2000.c to initialize the expansion bus */
 int comcerto_exp_values[5][7]= {
 	/* ENABLE, BASE, SEG_SZ, CFG, TMG1, TMG2, TMG3 */
-	{1, (EXP_BUS_REG_BASE_CS0 >> 12), ((EXP_BUS_REG_BASE_CS0 + EXP_CS0_SEG_SIZE - 1) >> 12), EXP_MEM_BUS_SIZE_16, 0x1A1A401F, 0x06060A04, 0x00000002},		/*TODO Values to check*/
+	{1, (EXP_BUS_REG_BASE_CS0 >> 12), ((EXP_BUS_REG_BASE_CS0 + EXP_CS0_SEG_SIZE - 1) >> 12), EXP_MEM_BUS_SIZE_16, 0x03034007, 0x04040502, 0x00000002},		/*TODO Values to check*/
 	{0, (EXP_BUS_REG_BASE_CS1 >> 12), ((EXP_BUS_REG_BASE_CS1 + EXP_CS1_SEG_SIZE - 1) >> 12), EXP_RDY_EN|EXP_MEM_BUS_SIZE_32, 0x1A1A401F, 0x06060A04, 0x00000002},	/*TODO Values to check*/
 	{0, (EXP_BUS_REG_BASE_CS2 >> 12), ((EXP_BUS_REG_BASE_CS2 + EXP_CS2_SEG_SIZE - 1) >> 12), EXP_STRB_MODE|EXP_ALE_MODE|EXP_MEM_BUS_SIZE_8, 0x1A10201A, 0x03080403, 0x0000002},	/*TODO Values to check*/
 	{0, (EXP_BUS_REG_BASE_CS3 >> 12), ((EXP_BUS_REG_BASE_CS3 + EXP_CS3_SEG_SIZE - 1) >> 12), EXP_STRB_MODE|EXP_ALE_MODE|EXP_MEM_BUS_SIZE_8, 0x1A10201A, 0x03080403, 0x0000002},	/*BT8370*/
-	{0, (EXP_BUS_REG_BASE_CS4 >> 12), ((EXP_BUS_REG_BASE_CS4 + EXP_CS4_SEG_SIZE - 1) >> 12), EXP_NAND_MODE|EXP_MEM_BUS_SIZE_8, 0x1A1A401F, 0x06060A04, 0x00000002},	/* NAND: TODO Values to check */
+	{1, (EXP_BUS_REG_BASE_CS4 >> 12), ((EXP_BUS_REG_BASE_CS4 + EXP_CS4_SEG_SIZE - 1) >> 12), EXP_NAND_MODE|EXP_MEM_BUS_SIZE_8, 0x00000001, 0x01010001, 0x00000002},	/* NAND: TODO Values to check */
 };
 
 /************************************************************************
diff --git a/arch/arm/mach-comcerto/board-c2kmfcnevm.c b/arch/arm/mach-comcerto/board-c2kmfcnevm.c
new file mode 100644
index 0000000..c14cf44
--- /dev/null
+++ b/arch/arm/mach-comcerto/board-c2kmfcnevm.c
@@ -0,0 +1,743 @@
+/*
+ * arch/arm/mach-comcerto/board-c2kmfcnevm.c
+ *
+ *  Copyright (C) 2012 Mindspeed Technologies, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/serial_8250.h>
+#include <linux/memblock.h>
+#include <linux/phy.h>
+
+#include <linux/mtd/mtd.h>
+#if defined(CONFIG_MTD_NAND_COMCERTO) || defined(CONFIG_MTD_NAND_COMCERTO_MODULE)
+#include <linux/mtd/nand.h>
+#endif
+#include <linux/mtd/partitions.h>
+
+#if defined(CONFIG_SPI_MSPD_LOW_SPEED) || defined(CONFIG_SPI_MSPD_HIGH_SPEED)
+#include <linux/spi/spi.h>
+#endif
+
+#include <asm/sizes.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/io.h>
+
+#include <asm/mach/flash.h>
+#include <asm/mach/arch.h>
+
+#include <mach/hardware.h>
+#include <mach/irqs.h>
+#include <mach/dma.h>
+#include <linux/dw_dmac.h>
+
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <asm/smp_twd.h>
+#include <asm/localtimer.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/time.h>
+#include <mach/gpio.h>
+
+
+extern void platform_reserve(void);
+extern void device_map_io (void);
+extern void device_irq_init(void);
+extern void device_init(void);
+extern void mac_addr_init(struct comcerto_pfe_platform_data *);
+extern struct sys_timer comcerto_timer;
+
+static void __init board_gpio_init(void)
+{
+#ifdef CONFIG_COMCERTO_PFE_UART_SUPPORT
+	writel((readl(COMCERTO_GPIO_PIN_SELECT_REG) & ~PFE_UART_GPIO) | PFE_UART_BUS, COMCERTO_GPIO_PIN_SELECT_REG);
+	c2k_gpio_pin_stat.c2k_gpio_pins_0_31 |= PFE_UART_GPIO_PIN; /* GPIOs 12 & 13 are used for PFE_UART */
+#endif
+
+#if defined(CONFIG_SPI_MSPD_LOW_SPEED) || defined(CONFIG_SPI2_MSPD_LOW_SPEED)
+	/* enable SPI pins */
+	writel((readl(COMCERTO_GPIO_PIN_SELECT_REG1) & ~(SPI_MUX_GPIO_1)) | (SPI_MUX_BUS_1), COMCERTO_GPIO_PIN_SELECT_REG1);
+	writel((readl(COMCERTO_GPIO_63_32_PIN_SELECT) & ~(SPI_MUX_GPIO_2)) | (SPI_MUX_BUS_2), COMCERTO_GPIO_63_32_PIN_SELECT);
+	c2k_gpio_pin_stat.c2k_gpio_pins_0_31 |= SPI_MUX_GPIO_1_PIN; /* GPIOs 18,19, 21,22, 30,31 are used for SPI*/
+	c2k_gpio_pin_stat.c2k_gpio_pins_32_63 |= SPI_MUX_GPIO_2_PIN; /* GPIO 32 is used for SPI*/
+#endif
+
+#if defined(CONFIG_SPI_MSPD_HIGH_SPEED)
+	/* enable SPI pins */
+	writel((readl(COMCERTO_GPIO_PIN_SELECT_REG1) & ~(SPI_2_MUX_GPIO_1)) | (SPI_2_MUX_BUS_1), COMCERTO_GPIO_PIN_SELECT_REG1);
+	writel((readl(COMCERTO_GPIO_63_32_PIN_SELECT) & ~(SPI_2_MUX_GPIO_2)) | (SPI_2_MUX_BUS_2), COMCERTO_GPIO_63_32_PIN_SELECT);
+	c2k_gpio_pin_stat.c2k_gpio_pins_0_31 |= SPI_2_MUX_GPIO_1_PIN;
+	c2k_gpio_pin_stat.c2k_gpio_pins_32_63 |= SPI_2_MUX_GPIO_2_PIN;
+#endif
+
+#if defined(CONFIG_COMCERTO_I2C_SUPPORT)
+	writel((readl(COMCERTO_GPIO_PIN_SELECT_REG1) & ~I2C_GPIO) | I2C_BUS, COMCERTO_GPIO_PIN_SELECT_REG1);
+	c2k_gpio_pin_stat.c2k_gpio_pins_0_31 |= I2C_GPIO_PIN;
+#endif
+
+#if defined(CONFIG_MTD_NAND_COMCERTO) || defined(CONFIG_MTD_NAND_COMCERTO_MODULE)
+	writel((readl(COMCERTO_GPIO_PIN_SELECT_REG1) & ~NAND_GPIO) | NAND_BUS, COMCERTO_GPIO_PIN_SELECT_REG1);
+	c2k_gpio_pin_stat.c2k_gpio_pins_0_31 |= NAND_GPIO_PIN;
+#endif
+
+#if defined(CONFIG_MTD_COMCERTO_NOR)
+	writel((readl(COMCERTO_GPIO_PIN_SELECT_REG1) & ~NOR_GPIO) | NOR_BUS, COMCERTO_GPIO_PIN_SELECT_REG1);
+	c2k_gpio_pin_stat.c2k_gpio_pins_0_31 |= NOR_GPIO_PIN;
+#endif
+}
+
+/* --------------------------------------------------------------------
+ *  NOR device
+ * -------------------------------------------------------------------- */
+#if defined(CONFIG_MTD_COMCERTO_NOR)
+
+static struct resource comcerto_nor_resources[] = {
+	{
+		.start	= NORFLASH_MEMORY_PHY1,
+		.end	= NORFLASH_MEMORY_PHY1 + SZ_64M - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct flash_platform_data comcerto_nor_data = {
+	.map_name	= "cfi_probe",
+	.width	= 2,
+};
+
+static struct platform_device comcerto_nor = {
+	.name           = "comcertoflash",
+	.id             = 0,
+	.num_resources  = ARRAY_SIZE(comcerto_nor_resources),
+	.resource       = comcerto_nor_resources,
+	.dev = {
+		.platform_data	= &comcerto_nor_data,
+	},
+};
+#endif
+
+static struct resource rtc_res[] = {
+       {
+               .start = COMCERTO_APB_RTC_BASE,
+               .end = COMCERTO_APB_RTC_BASE + SZ_32 - 1,
+               .flags = IORESOURCE_MEM,
+       },
+       {
+               .start = IRQ_RTC_ALM,
+               .flags = IORESOURCE_IRQ,
+       },
+       {
+               .start = IRQ_RTC_PRI,
+               .flags = IORESOURCE_IRQ,
+       },
+};
+static struct platform_device rtc_dev = {
+       .name = "c2k-rtc",
+       .id = -1,
+       .num_resources = ARRAY_SIZE(rtc_res),
+       .resource = rtc_res,
+};
+
+/* --------------------------------------------------------------------
+ *  DMAC controller
+ * -------------------------------------------------------------------- */
+#if defined(CONFIG_COMCERTO_DW_DMA_SUPPORT)
+static struct resource dw_dmac_resource[] = {
+	{
+		.start          = DW_DMA_DMAC_BASEADDR,
+		.end            = DW_DMA_DMAC_BASEADDR + 0x2C0,
+		.flags          = IORESOURCE_MEM,
+	},
+	{
+		.start          = IRQ_DMAC,
+		.flags          = IORESOURCE_IRQ,
+	}
+};
+
+static struct dw_dma_platform_data dw_dmac_data = {
+	.nr_channels    = 8,
+};
+
+static u64 dw_dmac_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device dw_dmac_device = {
+	.name           = "dw_dmac",
+	.id             = 0,
+	.dev            = {
+		.dma_mask = &dw_dmac_dma_mask,
+		.platform_data  = &dw_dmac_data,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+	},
+	.resource       = dw_dmac_resource,
+	.num_resources  = ARRAY_SIZE(dw_dmac_resource),
+};
+#endif
+
+/* --------------------------------------------------------------------
+ *  NAND device
+ * -------------------------------------------------------------------- */
+#if defined(CONFIG_MTD_NAND_COMCERTO) || defined(CONFIG_MTD_NAND_COMCERTO_MODULE)
+static struct resource comcerto_nand_resources[] = {
+	{
+		.start	= COMCERTO_NAND_FIO_ADDR,
+		.end	= COMCERTO_NAND_FIO_ADDR + COMCERTO_NAND_IO_SZ - 1,
+		.flags	= IORESOURCE_MEM,
+	}
+};
+
+static struct platform_device comcerto_nand = {
+	.name		= "comcertonand",
+	.id		= -1,
+	.dev		= {
+				.platform_data	= NULL,
+	},
+	.resource	= comcerto_nand_resources,
+	.num_resources	= ARRAY_SIZE(comcerto_nand_resources),
+};
+#endif
+
+/* --------------------------------------------------------------------
+ *  SPI bus controller
+ * -------------------------------------------------------------------- */
+#if defined(CONFIG_SPI_MSPD_LOW_SPEED) || defined(CONFIG_SPI_MSPD_HIGH_SPEED)
+
+#define	CLK_NAME	10
+struct spi_controller_pdata {
+	int use_dma;
+	int num_chipselects;
+	int bus_num;
+	u32 max_freq;
+	char clk_name[CLK_NAME];
+};
+
+struct spi_platform_data {
+	int type;
+	int dummy;
+};
+
+struct spi_controller_data {
+        u8 poll_mode;   /* 0 for contoller polling mode */
+        u8 type;        /* SPI/SSP/Micrwire */
+        u8 enable_dma;
+        void (*cs_control)(u32 command);
+};
+
+struct spi_platform_data spi_pdata = {
+	.type = 0,
+	.dummy = 0,
+};
+
+struct spi_controller_data spi_ctrl_data =  {
+        .poll_mode = 1,
+};
+
+static struct spi_board_info comcerto_spi_board_info[] = {
+	{
+		/* FIXME: for chipselect-0 */
+		.modalias = "s25fl256s0",
+		.chip_select = 0,
+		.max_speed_hz = 4*1000*1000,
+		.bus_num = 0,
+		.irq = -1,
+		.mode = SPI_MODE_3,
+		.platform_data = &spi_pdata,
+                .controller_data = &spi_ctrl_data,
+	},
+	{
+		/* FIXME: for chipselect-1 */
+		.modalias = "proslic",
+		.max_speed_hz = 4*1000*1000,
+		.chip_select = 1,
+		.mode = SPI_MODE_3,
+		.bus_num = 0,
+		.irq = -1,
+		.mode = SPI_MODE_3,
+		.platform_data = &spi_pdata,
+                .controller_data = &spi_ctrl_data,
+	},
+	{
+		.modalias = "comcerto_spi3",
+		.chip_select = 2,
+		.max_speed_hz = 4*1000*1000,
+		.bus_num = 0,
+		.irq = -1,
+		.mode = SPI_MODE_3,
+		.platform_data = &spi_pdata,
+                .controller_data = &spi_ctrl_data,
+	},
+
+#if 0 //MSIF
+
+	{
+		.modalias = "proslic",
+		.max_speed_hz = 2*1000*1000,
+		.chip_select = 3,
+		.mode = SPI_MODE_1,
+		.bus_num = 0,
+		.irq = -1,
+		.mode = SPI_MODE_3,
+		.platform_data = &spi_pdata,
+                .controller_data = &spi_ctrl_data,
+	},
+#else
+	{
+		.modalias = "legerity",
+		.chip_select = 3,
+		.max_speed_hz = 4*1000*1000,
+		.bus_num = 0,
+		.irq = -1,
+		.mode = SPI_MODE_3,
+		.platform_data = &spi_pdata,
+                .controller_data = &spi_ctrl_data,
+	},
+#endif
+};
+#endif
+
+#if defined(CONFIG_SPI_MSPD_HIGH_SPEED)
+struct spi_controller_pdata fast_spi_pdata = {
+	.use_dma = 0,
+	.num_chipselects = 2,
+	.bus_num = 1,
+	.max_freq = 60 * 1000 * 1000,
+	.clk_name = "DUS",
+};
+#endif
+
+#if defined(CONFIG_SPI_MSPD_HIGH_SPEED) || defined(CONFIG_SPI2_MSPD_HIGH_SPEED)
+static struct resource comcerto_fast_spi_resource[] = {
+	{
+		.start  = COMCERTO_AXI_SPI_BASE,
+		.end    = COMCERTO_AXI_SPI_BASE + SZ_4K - 1,
+		.flags  = IORESOURCE_MEM,
+	},
+	{
+		.start  = IRQ_SPI,
+		.flags  = IORESOURCE_IRQ,
+	}
+};
+
+static struct platform_device comcerto_fast_spi = {
+	.name = "comcerto_spi",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(comcerto_fast_spi_resource),
+	.resource = comcerto_fast_spi_resource,
+#if defined(CONFIG_SPI_MSPD_HIGH_SPEED)
+	.dev = {
+		.platform_data = &fast_spi_pdata,
+	},
+#endif
+};
+#endif
+
+#if defined(CONFIG_SPI_MSPD_LOW_SPEED)
+struct spi_controller_pdata ls_spi_pdata = {
+	.use_dma = 0,
+	.num_chipselects = 4,
+	.bus_num = 0,
+	.max_freq = 20 * 1000 * 1000,
+	.clk_name = "spi_i2c",
+};
+#endif
+
+#if defined(CONFIG_SPI_MSPD_LOW_SPEED) || defined(CONFIG_SPI2_MSPD_LOW_SPEED)
+static struct resource comcerto_spi_resource[] = {
+	{
+		.start  = COMCERTO_APB_SPI_BASE,
+		.end    = COMCERTO_APB_SPI_BASE + SZ_4K - 1,
+		.flags  = IORESOURCE_MEM,
+	},
+	{
+		.start  = IRQ_SPI_LS,
+		.flags  = IORESOURCE_IRQ,
+	}
+};
+
+static struct platform_device comcerto_spi = {
+	.name = "comcerto_spi",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(comcerto_spi_resource),
+	.resource = comcerto_spi_resource,
+#if defined(CONFIG_SPI_MSPD_LOW_SPEED)
+	.dev = {
+		.platform_data = &ls_spi_pdata,
+	},
+#endif
+};
+#endif
+
+/* --------------------------------------------------------------------
+ *  I2C bus controller
+ * -------------------------------------------------------------------- */
+#if defined(CONFIG_COMCERTO_I2C_SUPPORT)
+static struct resource comcerto_i2c_resources[] = {
+	{
+		.start	= COMCERTO_APB_I2C_BASE,
+		.end	= COMCERTO_APB_I2C_BASE + SZ_4K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= IRQ_I2C,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device comcerto_i2c = {
+	.name           = "comcerto_i2c",
+	.id             = -1,
+	.num_resources  = ARRAY_SIZE(comcerto_i2c_resources),
+	.resource       = comcerto_i2c_resources,
+};
+#endif
+
+/* --------------------------------------------------------------------
+*  Watchdog
+* -------------------------------------------------------------------- */
+#ifdef CONFIG_MPCORE_WATCHDOG
+static struct resource comcerto_a9wd_resources[] = {
+	{
+		.start	= COMCERTO_TWD_BASE,
+		.end	= COMCERTO_TWD_BASE + 0xFF,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.name	= "mpcore_wdt",
+		.start	= IRQ_LOCALWDOG,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device comcerto_a9wd = {
+	.name		= "mpcore_wdt",
+	.id             = -1,
+	.num_resources  = ARRAY_SIZE(comcerto_a9wd_resources),
+	.resource       = comcerto_a9wd_resources,
+};
+#endif
+
+#ifdef CONFIG_COMCERTO_WATCHDOG
+static struct resource comcerto_wdt_resources[] = {
+	{
+		.start	= COMCERTO_APB_TIMER_BASE + 0xD0,
+		.end	= COMCERTO_APB_TIMER_BASE + 0xD8,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device comcerto_wdt = {
+        .name   = "comcerto_wdt",
+        .id     = -1,
+	.num_resources  = ARRAY_SIZE(comcerto_wdt_resources),
+	.resource       = comcerto_wdt_resources,
+};
+#endif
+
+#if defined(CONFIG_COMCERTO_ELP_SUPPORT)
+/* --------------------------------------------------------------------
+ *  IPsec
+ * -------------------------------------------------------------------- */
+static struct resource comcerto_elp_resources[] = {
+	{
+		.name   = "elp",
+		.start  = COMCERTO_AXI_SPACC_PDU_BASE,
+		.end    = COMCERTO_AXI_SPACC_PDU_BASE + SZ_16M  - 1,
+		.flags  = IORESOURCE_MEM,
+	},
+	{
+		.name   = "irq_spacc",
+		.start  = IRQ_SPACC,
+		.end    = IRQ_SPACC,
+		.flags  = IORESOURCE_IRQ,
+	}
+};
+
+static u64 comcerto_elp_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device  comcerto_elp_device = {
+	.name                   = "Elliptic-EPN1802",
+	.id                     = 0,
+	.num_resources          = 2,
+	.resource               = comcerto_elp_resources,
+	.dev = {
+		.dma_mask               = &comcerto_elp_dma_mask,
+		.coherent_dma_mask      = DMA_BIT_MASK(32),
+	},
+};
+#endif
+
+static struct comcerto_tdm_data comcerto_tdm_pdata = {
+	.fsoutput = 1, /* Generic Pad Control and Version ID Register[2] */
+	.fspolarity = 0, /* 28 FSYNC_FALL(RISE)_EDGE */
+	.fshwidth = 1, /* High_Phase_Width[10:0] */
+	.fslwidth = 0xFF, /* Low_Phase_Width[10:0] */
+	.clockhz = 2048000, /* INC_VALUE[29:0] According to the desired TDM clock output \
+			       frequency, this field should be configured */
+	.clockout = 1, /* 0 -> set bit 21, clear bit 20 in COMCERTO_GPIO_IOCTRL_REG
+			  (software control, clock input)
+			  1 -> set bit 21 and 20 in COMCERTO_GPIO_IOCTRL_REG
+			  (software control, clock output)
+			  2 -> clear bit 21 in COMCERTO_GPIO_IOCTRL_REG (hardware control) */
+	.tdmmux = 0x1, /* TDM interface Muxing:0x0 - TDM block, 0x1 - ZDS block,
+		0x2 - GPIO[63:60] signals and 0x3 - MSIF block is selected */
+#if 0
+	/* FIX ME - Need correct values for TDM_DR, TDM_DX, TDM_FS and TDM_CK */
+	.tdmck = 0x3F,
+	.tdmfs = 0x3F,
+	.tdmdx = 0x3F,
+	.tdmdr = 0x3F,
+#endif
+};
+
+static struct platform_device comcerto_tdm_device = {
+	.name	= "comcerto-tdm",
+	.id		= 0,
+	.dev.platform_data = &comcerto_tdm_pdata,
+	.num_resources	= 0,
+	.resource = NULL,
+};
+
+#if defined(CONFIG_DSPG_DECT_CSS)
+#define CSS_ITCM_BASE		COMCERTO_AXI_DECT_BASE
+#define CSS_ITCM_SIZE		(SZ_1M)
+
+#define CSS_DTCM_BASE		(CSS_ITCM_BASE + CSS_ITCM_SIZE)
+#define CSS_DTCM_SIZE		(SZ_1M)
+
+static struct resource comcerto_css_resources[] = {
+	{
+		.name	= "itcm",
+		.start	= CSS_ITCM_BASE,
+		.end	= CSS_ITCM_BASE + CSS_ITCM_SIZE - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.name	= "dtcm",
+		.start	= CSS_DTCM_BASE,
+		.end	= CSS_DTCM_BASE + CSS_DTCM_SIZE - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device comcerto_css_device = {
+	.name		= "css",
+	.id		= 0,
+	.dev		= {
+		.platform_data = 0,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+	},
+	.num_resources	= ARRAY_SIZE(comcerto_css_resources),
+	.resource	= comcerto_css_resources,
+};
+#endif
+
+static struct resource comcerto_pfe_resources[] = {
+	{
+		.name	= "apb",
+		.start  = COMCERTO_APB_PFE_BASE,
+		.end    = COMCERTO_APB_PFE_BASE + COMCERTO_APB_PFE_SIZE - 1,
+		.flags  = IORESOURCE_MEM,
+	},
+	{
+		.name	= "axi",
+		.start  = COMCERTO_AXI_PFE_BASE,
+		.end    = COMCERTO_AXI_PFE_BASE + COMCERTO_AXI_PFE_SIZE - 1,
+		.flags  = IORESOURCE_MEM,
+	},
+	{
+		.name	= "ddr",
+		.start  = COMCERTO_PFE_DDR_BASE,
+		.end	= COMCERTO_PFE_DDR_BASE + COMCERTO_PFE_DDR_SIZE - 1,
+		.flags  = IORESOURCE_MEM,
+	},
+	{
+		.name	= "iram",
+		.start  = COMCERTO_PFE_IRAM_BASE,
+		.end	= COMCERTO_PFE_IRAM_BASE + COMCERTO_PFE_IRAM_SIZE - 1,
+		.flags  = IORESOURCE_MEM,
+	},
+        {
+                .name   = "ipsec",
+                .start  = COMCERTO_AXI_IPSEC_BASE,
+                .end    = COMCERTO_AXI_IPSEC_BASE + COMCERTO_AXI_IPSEC_SIZE - 1,
+                .flags  = IORESOURCE_MEM,
+        },
+
+	{
+		.name	= "hif",
+		.start  = IRQ_PFE_HIF,
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+static struct comcerto_pfe_platform_data comcerto_pfe_pdata = {
+	.comcerto_eth_pdata[0] = {
+		.name = GEM0_ITF_NAME,
+		.device_flags = CONFIG_COMCERTO_GEMAC,
+		.mii_config = CONFIG_COMCERTO_USE_RGMII,
+		.gemac_mode = GEMAC_SW_CONF | GEMAC_SW_FULL_DUPLEX | GEMAC_SW_SPEED_1G,
+		.phy_flags = GEMAC_PHY_RGMII_ADD_DELAY,
+		.bus_id = 0,
+		.phy_id = 4,
+		.gem_id = 0,
+		.mac_addr = (u8[])GEM0_MAC,
+	},
+
+	.comcerto_eth_pdata[1] = {
+		.name = GEM1_ITF_NAME,
+		.device_flags = CONFIG_COMCERTO_GEMAC,
+		.mii_config = CONFIG_COMCERTO_USE_RGMII,
+		.gemac_mode = GEMAC_SW_CONF | GEMAC_SW_FULL_DUPLEX | GEMAC_SW_SPEED_1G,
+		.phy_flags = GEMAC_NO_PHY,
+		.gem_id = 1,
+		.mac_addr = (u8[])GEM1_MAC,
+	},
+
+	.comcerto_eth_pdata[2] = {
+		.name = GEM2_ITF_NAME,
+		.device_flags = CONFIG_COMCERTO_GEMAC,
+		.mii_config = CONFIG_COMCERTO_USE_RGMII,
+		.gemac_mode = GEMAC_SW_CONF | GEMAC_SW_FULL_DUPLEX | GEMAC_SW_SPEED_1G,
+		.phy_flags = GEMAC_NO_PHY,
+		.gem_id = 2,
+		.mac_addr = (u8[])GEM2_MAC,
+	},
+
+	/**
+	 * There is a single mdio bus coming out of C2K.  And that's the one
+	 * connected to GEM0. All PHY's, switchs will be connected to the same
+	 * bus using different addresses. Typically .bus_id is always 0, only
+	 * .phy_id will change in the different comcerto_eth_pdata[] structures above.
+	 */
+	.comcerto_mdio_pdata[0] = {
+		.enabled = 1,
+		.phy_mask = 0xFFFFFFEF,
+		.mdc_div = 96,
+		.irq = {
+			[4] = PHY_POLL,
+		},
+	},
+};
+
+static u64 comcerto_pfe_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device comcerto_pfe_device = {
+	.name		= "pfe",
+	.id		= 0,
+	.dev		= {
+		.platform_data		= &comcerto_pfe_pdata,
+		.dma_mask		= &comcerto_pfe_dma_mask,
+		.coherent_dma_mask	= DMA_BIT_MASK(32),
+	},
+	.num_resources	= ARRAY_SIZE(comcerto_pfe_resources),
+	.resource	= comcerto_pfe_resources,
+};
+
+static struct platform_device *comcerto_devices[] __initdata = {
+#if defined(CONFIG_MTD_NAND_COMCERTO) || defined(CONFIG_MTD_NAND_COMCERTO_MODULE)
+		&comcerto_nand,
+#endif
+#if defined(CONFIG_MTD_COMCERTO_NOR)
+		&comcerto_nor,
+#endif
+#if defined(CONFIG_COMCERTO_I2C_SUPPORT)
+		&comcerto_i2c,
+#endif
+
+#if defined (CONFIG_MPCORE_WATCHDOG)
+		&comcerto_a9wd,
+#endif
+
+#if defined(CONFIG_COMCERTO_WATCHDOG)
+		&comcerto_wdt,
+#endif
+
+#if defined(CONFIG_SPI_MSPD_HIGH_SPEED) || defined(CONFIG_SPI2_MSPD_HIGH_SPEED)
+		&comcerto_fast_spi,
+#endif
+#if defined(CONFIG_SPI_MSPD_LOW_SPEED) || defined(CONFIG_SPI2_MSPD_LOW_SPEED)
+		&comcerto_spi,
+#endif
+#if defined(CONFIG_COMCERTO_DW_DMA_SUPPORT)
+		&dw_dmac_device,
+#endif
+		&comcerto_tdm_device,
+		&comcerto_pfe_device,
+		&rtc_dev,
+#if defined(CONFIG_DSPG_DECT_CSS)
+		&comcerto_css_device,
+#endif
+#if defined(CONFIG_COMCERTO_ELP_SUPPORT)
+	&comcerto_elp_device,
+#endif
+};
+
+
+/************************************************************************
+ *  Expansion bus
+ *
+ ************************************************************************/
+/* This variable is used by comcerto-2000.c to initialize the expansion bus */
+int comcerto_exp_values[5][7]= {
+	/* ENABLE, BASE, SEG_SZ, CFG, TMG1, TMG2, TMG3 */
+	{1, (EXP_BUS_REG_BASE_CS0 >> 12), ((EXP_BUS_REG_BASE_CS0 + EXP_CS0_SEG_SIZE - 1) >> 12), EXP_MEM_BUS_SIZE_16, 0x03034007, 0x04040502, 0x00000002},		/*TODO Values to check*/
+	{0, (EXP_BUS_REG_BASE_CS1 >> 12), ((EXP_BUS_REG_BASE_CS1 + EXP_CS1_SEG_SIZE - 1) >> 12), EXP_RDY_EN|EXP_MEM_BUS_SIZE_32, 0x1A1A401F, 0x06060A04, 0x00000002},	/*TODO Values to check*/
+	{0, (EXP_BUS_REG_BASE_CS2 >> 12), ((EXP_BUS_REG_BASE_CS2 + EXP_CS2_SEG_SIZE - 1) >> 12), EXP_STRB_MODE|EXP_ALE_MODE|EXP_MEM_BUS_SIZE_8, 0x1A10201A, 0x03080403, 0x0000002},	/*TODO Values to check*/
+	{0, (EXP_BUS_REG_BASE_CS3 >> 12), ((EXP_BUS_REG_BASE_CS3 + EXP_CS3_SEG_SIZE - 1) >> 12), EXP_STRB_MODE|EXP_ALE_MODE|EXP_MEM_BUS_SIZE_8, 0x1A10201A, 0x03080403, 0x0000002},	/*BT8370*/
+	{1, (EXP_BUS_REG_BASE_CS4 >> 12), ((EXP_BUS_REG_BASE_CS4 + EXP_CS4_SEG_SIZE - 1) >> 12), EXP_NAND_MODE|EXP_MEM_BUS_SIZE_8, 0x00000001, 0x01010001, 0x00000002},	/* NAND: TODO Values to check */
+};
+
+/************************************************************************
+ *  Machine definition
+ *
+ ************************************************************************/
+static void __init platform_map_io(void)
+{
+	device_map_io();
+}
+
+static void __init platform_irq_init(void)
+{
+	device_irq_init();
+}
+
+static void __init platform_init(void)
+{
+	device_init();
+	board_gpio_init();
+
+#if defined(CONFIG_SPI_MSPD_LOW_SPEED) || defined(CONFIG_SPI_MSPD_HIGH_SPEED)
+	spi_register_board_info(comcerto_spi_board_info, ARRAY_SIZE(comcerto_spi_board_info));
+#endif
+	mac_addr_init(&comcerto_pfe_pdata);
+
+	platform_add_devices(comcerto_devices, ARRAY_SIZE(comcerto_devices));
+}
+
+MACHINE_START(COMCERTO, "Comcerto 2000 MFCN EVM")
+	/* Mindspeed Technologies Inc. */
+	.atag_offset    = COMCERTO_AXI_DDR_BASE + 0x100,
+	.reserve	= platform_reserve,
+	.map_io		= platform_map_io,
+	.init_irq	= platform_irq_init,
+	.init_machine	= platform_init,
+	.timer		= &comcerto_timer,
+#ifdef CONFIG_ZONE_DMA
+	.dma_zone_size	= SZ_32M + 3*SZ_4M,
+#endif
+MACHINE_END
diff --git a/arch/arm/mach-comcerto/comcerto-2000.c b/arch/arm/mach-comcerto/comcerto-2000.c
index edff778..9ce93ed 100644
--- a/arch/arm/mach-comcerto/comcerto-2000.c
+++ b/arch/arm/mach-comcerto/comcerto-2000.c
@@ -57,6 +57,7 @@
 #include <linux/clockchips.h>
 #include <linux/clk.h>
 #include <mach/comcerto-2000/clock.h>
+#include <mach/comcerto-2000/pm.h>
 #include <mach/gpio.h>
 
 struct c2k_gpio_pin_stat_info c2k_gpio_pin_stat =
@@ -154,7 +155,12 @@
 	},
 };
 
-#define PFE_DMA_SIZE		SZ_4M
+#if defined(CONFIG_COMCERTO_64K_PAGES)
+#define PFE_DMA_SIZE		(4 * SZ_1M)
+#else
+#define PFE_DMA_SIZE            (16 * SZ_1M)
+#endif
+
 #define DSPG_DECT_CSS_DMA_SIZE	(10 * SZ_1M)
 
 void __init device_map_io(void)
@@ -423,6 +429,9 @@
         u32 val;
 	int ref_clk_24;
 
+	/* Move SATA controller to DDRC2 port */
+	writel(readl(COMCERTO_GPIO_FABRIC_CTRL_REG) | 0x2, COMCERTO_GPIO_FABRIC_CTRL_REG);
+
 	val = readl(COMCERTO_GPIO_SYSTEM_CONFIG);
 	ref_clk_24 = val & (BIT_5_MSK|BIT_7_MSK);
 
@@ -902,6 +911,8 @@
 
 void __init device_init(void)
 {
+	/* Default value for the bit mask */
+	unsigned int default_host_utilpe_shared_bitmask = ~(USB2p0_IRQ|WOL_IRQ);
 	struct clk *axi_clk,*ddr_clk,*arm_clk,*l2cc_clk;
 	HAL_clk_div_backup_relocate_table ();
 	system_rev = (readl(COMCERTO_GPIO_DEVICE_ID_REG) >> 24) & 0xf;
@@ -985,6 +996,8 @@
 	// [FIXME] Take TDM out of reset
 	//writel(readl(COMCERTO_BLOCK_RESET_REG) | TDM_RST, COMCERTO_BLOCK_RESET_REG);
 #endif
+	/* Default bit mask is applied here , which will be passed to Util-Pe*/
+	c2k_pm_bitmask_store(default_host_utilpe_shared_bitmask);
 
 	platform_add_devices(comcerto_common_devices, ARRAY_SIZE(comcerto_common_devices));
 }
diff --git a/arch/arm/mach-comcerto/include/mach/board-c2kmfcnevm.h b/arch/arm/mach-comcerto/include/mach/board-c2kmfcnevm.h
new file mode 100644
index 0000000..2c52c91
--- /dev/null
+++ b/arch/arm/mach-comcerto/include/mach/board-c2kmfcnevm.h
@@ -0,0 +1,54 @@
+/*
+ * arch/arm/mach-comcerto/include/mach/board-c2kmfcnevm.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __BOARD_C2KMFCNEVM_H__
+#define __BOARD_C2KMFCNEVM_H__
+
+#include <mach/hardware.h>
+	
+	/***********************************
+	 * Expansion bus configuration
+	 ***********************************/
+	 
+	#define COMCERTO_EXPCLK		50000000	/* 50MHz */
+
+	/***********************************
+	 * GPIO
+	 ***********************************/
+	#define COMCERTO_OUTPUT_GPIO		(COMCERTO_NAND_CE)
+	#define COMCERTO_IRQ_RISING_EDGE_GPIO	0 // [FIXME]
+	#define COMCERTO_IRQ_FALLING_EDGE_GPIO	(GPIO_2 | GPIO_0) // [FIXME]
+	#define COMCERTO_IRQ_LEVEL_GPIO 	GPIO_2 // [FIXME]
+	/*Are pins used either as GPIO or as pins for others IP blocks*/
+	#define COMCERTO_GPIO_PIN_USAGE		(SPI_BUS) // [FIXME]
+
+	/***********************************
+	 * EEPROM
+	 ***********************************/
+
+	/***********************************
+	 * NOR
+	 ***********************************/
+	#define NORFLASH_MEMORY_PHY1		EXP_CS0_AXI_BASEADDR
+
+	/***********************************
+	 * NAND
+	 ***********************************/
+	#define COMCERTO_EXP_CS4_SEG_SZ		1
+
+	#define COMCERTO_NAND_FIO_ADDR		EXP_CS4_AXI_BASEADDR
+	#define COMCERTO_NAND_BR		0x20000000 /* BR is on GPIO_29 */
+	#define COMCERTO_NAND_CE		0x10000000 /* CE is on GPIO_28 */
+	#define COMCERTO_NAND_IO_SZ		((COMCERTO_EXP_CS4_SEG_SZ << 12) +0x1000)
+
+	/***********************************
+	 * SLIC
+	 ***********************************/
+	#define COMCERTO_SLIC_GPIO_IRQ		IRQ_G2
+
+#endif
diff --git a/arch/arm/mach-comcerto/include/mach/c2k_dma.h b/arch/arm/mach-comcerto/include/mach/c2k_dma.h
index b1dd18d..942ac7f 100644
--- a/arch/arm/mach-comcerto/include/mach/c2k_dma.h
+++ b/arch/arm/mach-comcerto/include/mach/c2k_dma.h
@@ -36,9 +36,25 @@
 #define BLAST	(1 << 16)
 #define BFIX	(1 << 17)
 
-#define MDMA_INBOUND_BUF_DESC	256
+// Block Size
+#define XOR_BLOCK_SIZE_256	0
+#define XOR_BLOCK_SIZE_512	1
+#define XOR_BLOCK_SIZE_1024	2
+#define XOR_BLOCK_SIZE_2048	3
+#define XOR_BLOCK_SIZE_4096	4
+
+#define COMCERTO_XOR_MAX_SRC    6
+
+#define MDMA_INBOUND_BUF_DESC		256
 #define MDMA_OUTBOUND_BUF_DESC	256
 
+#define XOR_INBOUND_BUF_DESC	6
+#define XOR_OUTBOUND_BUF_DESC	2
+
+/* FLEN => Maximum no. of fdescs mdma can process at a time is 4k-1 */
+/* Need to verify if these many can be created in aram_pool. Don't know whether someone else use iram_pool */
+//#define XOR_FDESC_COUNT	256
+
 #define MDMA_MAX_BUF_SIZE		0xffff
 #define MDMA_SPLIT_BUF_SIZE		0x8000	/* half a page with 64kB pages */
 
@@ -72,18 +88,39 @@
 #define ARPROT(x)			((x) << 4)
 #define ARCACHE(x)			((x) << 0)
 
+enum mdma_transaction_type {
+	MDMA_MEMCPY,
+	MDMA_XOR,
+	MDMA_XOR_VAL,
+};
 
-struct comcerto_xor_buffer_desc {
+struct comcerto_mdma_buffer_desc {
 	u32 bpointer;
 	u32 bcontrol;
 }__attribute__ ((aligned(8)));
 
+struct comcerto_memcpy_inbound_fdesc {
+	u32  next_desc;
+	u32  fcontrol;
+	u32  fstatus0;
+	u32  fstatus1;
+	struct comcerto_mdma_buffer_desc bdesc[MDMA_INBOUND_BUF_DESC];
+}__attribute__ ((aligned(16)));
+
+struct comcerto_memcpy_outbound_fdesc {
+	u32  next_desc;
+	u32  fcontrol;
+	u32  fstatus0;
+	u32  fstatus1;
+	struct comcerto_mdma_buffer_desc bdesc[MDMA_OUTBOUND_BUF_DESC];
+}__attribute__ ((aligned(16)));
+
 struct comcerto_xor_inbound_fdesc {
 	u32  next_desc;
 	u32  fcontrol;
 	u32  fstatus0;
 	u32  fstatus1;
-	struct comcerto_xor_buffer_desc bdesc[MDMA_INBOUND_BUF_DESC];
+	struct comcerto_mdma_buffer_desc bdesc[XOR_INBOUND_BUF_DESC];
 }__attribute__ ((aligned(16)));
 
 struct comcerto_xor_outbound_fdesc {
@@ -91,7 +128,7 @@
 	u32  fcontrol;
 	u32  fstatus0;
 	u32  fstatus1;
-	struct comcerto_xor_buffer_desc bdesc[MDMA_OUTBOUND_BUF_DESC];
+	struct comcerto_mdma_buffer_desc bdesc[XOR_OUTBOUND_BUF_DESC];
 }__attribute__ ((aligned(16)));
 
 struct comcerto_dma_buf {
@@ -113,9 +150,20 @@
 	struct comcerto_dma_buf out_bdesc[MDMA_OUTBOUND_BUF_DESC];
 };
 
+struct mdma_xor_struct {
+	int transaction_type;
+	int xor_block_size;
+	int xor_src_cnt;
+	dma_addr_t **xor_srcs;
+	dma_addr_t *xor_dest;
+};
 
-extern struct comcerto_xor_inbound_fdesc *mdma_in_desc;
-extern struct comcerto_xor_outbound_fdesc *mdma_out_desc;
+
+extern struct comcerto_memcpy_inbound_fdesc *mdma_in_desc;
+extern struct comcerto_memcpy_outbound_fdesc *mdma_out_desc;
+
+extern struct comcerto_xor_inbound_fdesc *xor_in_fdesc[];
+extern struct comcerto_xor_outbound_fdesc *xor_out_fdesc[];
 
 
 static inline void comcerto_dma_set_in_bdesc(u32 idx, u32 addr, u32 ctrl)
@@ -140,15 +188,42 @@
 	mdma_out_desc->bdesc[idx].bcontrol |= ctrl;
 }
 
+/****************** XOR functions ********************/
+static inline void mdma_xor_set_in_bdesc(u32 xor_cbuf_wr_cntr, u32 idx, u32 addr, u32 ctrl)
+{
+	xor_in_fdesc[xor_cbuf_wr_cntr]->bdesc[idx].bpointer = addr;
+	xor_in_fdesc[xor_cbuf_wr_cntr]->bdesc[idx].bcontrol = ctrl;
+}
+
+static inline void mdma_xor_set_out_bdesc(u32 xor_cbuf_wr_cntr, u32 idx, u32 addr, u32 ctrl)
+{
+	xor_out_fdesc[xor_cbuf_wr_cntr]->bdesc[idx].bpointer = addr;
+	xor_out_fdesc[xor_cbuf_wr_cntr]->bdesc[idx].bcontrol = ctrl;
+}
+
+static inline void mdma_xor_in_bdesc_ctrl_update(u32 xor_cbuf_wr_cntr,u32 idx, u32 ctrl)
+{
+	xor_in_fdesc[xor_cbuf_wr_cntr]->bdesc[idx].bcontrol |= ctrl;
+}
+
+static inline void mdma_xor_out_bdesc_ctrl_update(u32 xor_cbuf_wr_cntr,u32 idx, u32 ctrl)
+{
+	xor_out_fdesc[xor_cbuf_wr_cntr]->bdesc[idx].bcontrol |= ctrl;
+}
+
+/****************** XOR functions end ********************/
+
 extern void comcerto_dma_get(void);
 extern void comcerto_dma_put(void);
 extern void comcerto_dma_set_in_bdesc(u32 idx, u32 addr, u32 ctrl);
 extern void comcerto_dma_set_out_bdesc(u32 idx, u32 addr, u32 ctrl);
 extern void comcerto_dma_start(void);
 extern void comcerto_dma_wait(void);
+extern void comcerto_do_mdma_xor(unsigned int src_count, unsigned int bytes, dma_addr_t dest, dma_addr_t *srcs);
+extern void comcerto_do_mdma_memcpy(void);
 
-int comcerto_dma_sg_add_input(struct comcerto_dma_sg *sg, struct page *page, unsigned int offset, unsigned int len, int use_acp);
-int comcerto_dma_sg_add_output(struct comcerto_dma_sg *sg, struct page *page, unsigned int offset, unsigned int len, int use_acp);
+int comcerto_dma_sg_add_input(struct comcerto_dma_sg *sg, void *p, unsigned int len, int use_acp);
+int comcerto_dma_sg_add_output(struct comcerto_dma_sg *sg, void *p, unsigned int len, int use_acp);
 void comcerto_dma_sg_setup(struct comcerto_dma_sg *sg, unsigned int len);
 void comcerto_dma_sg_cleanup(struct comcerto_dma_sg *sg, unsigned int len);
 
@@ -163,3 +238,4 @@
 #endif
 
 #endif /* C2K_DMA_H_ */
+
diff --git a/arch/arm/mach-comcerto/include/mach/comcerto-2000/gpio.h b/arch/arm/mach-comcerto/include/mach/comcerto-2000/gpio.h
index 24e5982..764b176 100644
--- a/arch/arm/mach-comcerto/include/mach/comcerto-2000/gpio.h
+++ b/arch/arm/mach-comcerto/include/mach/comcerto-2000/gpio.h
@@ -122,6 +122,25 @@
 #define GPIO_1_S	0x00000002
 #define GPIO_2_S	0x00000004
 
+/* GPIO Pin Number and Description */
+#define GPIO_PIN_NUM_0		0
+#define GPIO_PIN_NUM_1		1
+#define GPIO_PIN_NUM_2		2
+#define GPIO_PIN_NUM_3		3
+#define GPIO_PIN_NUM_4		4
+#define GPIO_PIN_NUM_5		5
+
+#define GPIO_PIN_DESC_0		"gpio-0"
+#define GPIO_PIN_DESC_1		"gpio-1"
+#define GPIO_PIN_DESC_2		"gpio-2"
+#define GPIO_PIN_DESC_3		"gpio-3"
+#define GPIO_PIN_DESC_4		"gpio-4"
+#define GPIO_PIN_DESC_5		"gpio-5"
+
+#define	GPIO_SET_0	0
+#define	GPIO_SET_1	1
+
+
 /* GPIO Pin Mask */
 #define GPIO_PIN_0		(0x1 << 0)
 #define GPIO_PIN_1		(0x1 << 1)
diff --git a/arch/arm/mach-comcerto/include/mach/comcerto-2000/pm.h b/arch/arm/mach-comcerto/include/mach/comcerto-2000/pm.h
new file mode 100644
index 0000000..80f2102
--- /dev/null
+++ b/arch/arm/mach-comcerto/include/mach/comcerto-2000/pm.h
@@ -0,0 +1,72 @@
+/*
+ * pm.h
+ *
+ * Power managemen driver for Comcerto C2K device - internal header file
+ *
+ * This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#ifndef __ARCH_ARM_C2K_PM_H__
+#define __ARCH_ARM_C2K_PM_H__
+
+/* PMU Interrup masks */
+#define GPIO0_IRQ               (1 << 0)
+#define GPIO1_IRQ               (1 << 1)
+#define GPIO2_IRQ               (1 << 2)
+#define GPIO3_IRQ               (1 << 3)
+
+#define GPIO4_IRQ               (1 << 4)
+#define GPIO5_IRQ               (1 << 5)
+#define GPIO6_IRQ               (1 << 6)
+#define GPIO7_IRQ               (1 << 7)
+
+#define TIMER0_IRQ              (1 << 8)
+#define TIMER1_IRQ              (1 << 9)
+#define TIMER2_IRQ              (1 << 10)
+#define TIMER3_IRQ              (1 << 11)
+
+#define ZDS_MSIF_IRQ            (1 << 12)
+#define RTC_ALM_IRQ             (1 << 13)
+#define RTC_PRI_IRQ             (1 << 14)
+#define PCIe0_IRQ               (1 << 15)
+
+#define PCIe1_IRQ               (1 << 16)
+#define SATA_IRQ                (1 << 17)
+#define SATA_MSI_IRQ            (1 << 18)
+#define USB2p0_IRQ              (1 << 19)
+
+#define USB3p0_IRQ              (1 << 20)
+#define HFE_0_IRQ               (1 << 21)
+#define WOL_IRQ                 (1 << 22)
+#define CSS_IRQ                 (1 << 23)
+
+#define DUS_DMAC_IRQ            (1 << 24)
+#define DUS_UART0_IRQ           (1 << 25)
+#define DUS_UART0UARTS2_IRQ     (1 << 26)
+#define HFE_1_IRQ               (1 << 27)
+
+#define USB3p0_PM               (1 << 28)
+#define PTP0_IRQ                (1 << 29)
+#define PTP1_IRQ                (1 << 30)
+#define PTP2_IRQ                (1 << 31)
+
+#define JUMP_TO_RESUME_1		        0xe3a00020 	/* mov	r0, #32 */
+#define JUMP_TO_RESUME_2		        0xe590f000 	/* ldr	pc, [r0] */
+
+/*
+ * Two Bytes are used as shared memory between Host and UtilPE
+ * One for installing the Suspend Event and Return Resume location
+ * 2nd to pass the bitmask
+ */
+#define HOST_UTILPE_SHARED_ADDRESS_OFF  0x2400 /* This is offset into the iRAM */
+#define HOST_UTILPE_SHARED_ADDRESS      (IRAM_MEMORY_VADDR+HOST_UTILPE_SHARED_ADDRESS_OFF)
+
+/* Global Variable for Shared Util-PE interrupt Mask */
+extern unsigned host_utilpe_shared_pmu_bitmask;
+
+void c2k_pm_bitmask_store(unsigned int);
+unsigned int c2k_pm_bitmask_show(void);
+
+#endif
diff --git a/arch/arm/mach-comcerto/include/mach/ecc.h b/arch/arm/mach-comcerto/include/mach/ecc.h
index 661a6b9..f82a611 100644
--- a/arch/arm/mach-comcerto/include/mach/ecc.h
+++ b/arch/arm/mach-comcerto/include/mach/ecc.h
@@ -64,12 +64,12 @@
 #define ECC_LVL_30 0x1E
 #define ECC_LVL_32 0x20
 
-/* ECC Level 8 is used */
-/* #define ECC_LVL_VAL ECC_LVL_8 */
-
-/* ECC Level 24 is used */
-#define ECC_LVL_VAL ECC_LVL_24
-
+#if defined (CONFIG_NAND_COMCERTO_ECC_24_HW_BCH)
+	#define ECC_LVL_VAL ECC_LVL_24 /* ECC Level 24 is used */
+#elif defined (CONFIG_NAND_COMCERTO_ECC_8_HW_BCH)
+	#define ECC_LVL_VAL ECC_LVL_8 /* ECC Level 8 is used */
+#endif
+ 
 /* Block size used in Bytes*/
 #define ECC_BLOCK_SIZE_512 512
 #define ECC_BLOCK_SIZE_1024 1024
diff --git a/arch/arm/mach-comcerto/include/mach/hardware.h b/arch/arm/mach-comcerto/include/mach/hardware.h
index d054691..b86a3f1 100644
--- a/arch/arm/mach-comcerto/include/mach/hardware.h
+++ b/arch/arm/mach-comcerto/include/mach/hardware.h
@@ -38,6 +38,8 @@
 		#include <mach/board-optimus.h>
 	#elif defined(CONFIG_C2K_EVM)
 		#include <mach/board-c2kevm.h>
+	#elif defined(CONFIG_C2K_MFCN_EVM)
+		#include <mach/board-c2kmfcnevm.h>
 	#elif defined(CONFIG_RTSM_C2K)
 		#include <mach/board-c2krtsm.h>
 	
diff --git a/arch/arm/mach-comcerto/include/mach/reset.h b/arch/arm/mach-comcerto/include/mach/reset.h
index fd9c73c..16045ea 100644
--- a/arch/arm/mach-comcerto/include/mach/reset.h
+++ b/arch/arm/mach-comcerto/include/mach/reset.h
@@ -110,4 +110,21 @@
 extern void c2000_block_reset(int block,int state);
 extern void reset_init(void);
 
+#if defined(CONFIG_C2K_MFCN_EVM)
+/* C2000 device blocks which are to be put
+ * in out of reset(GPIO).
+ */
+typedef enum {
+	COMPONENT_ATHEROS_SWITCH=0,
+	COMPONENT_SLIC,
+	COMPONENT_PCIE0,
+	COMPONENT_PCIE1,
+	COMPONENT_USB_HUB,
+	COMPONENT_EXP_DAUGTHER_CARD,
+	COMPONENT_RGMII0,
+	COMPONENT_RGMII1
+}C2000_GEN2_GPIO_RESET_COMPONENT;
+
+void GPIO_reset_external_device(int block,int state);
+#endif
 #endif
diff --git a/arch/arm/mach-comcerto/pcie-c2000.c b/arch/arm/mach-comcerto/pcie-c2000.c
index 60ebba9..e31d010 100644
--- a/arch/arm/mach-comcerto/pcie-c2000.c
+++ b/arch/arm/mach-comcerto/pcie-c2000.c
@@ -46,6 +46,7 @@
 
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <mach/comcerto-2000/pm.h>
 //#define COMCERTO_PCIE_DEBUG
 
 #ifdef CONFIG_PCI_MSI
@@ -286,6 +287,13 @@
 	writel_relaxed(val1, va_address);
 }
 
+static inline void nop_delay(void)
+{
+        int k;
+        for(k = 0 ; k < 1000; k++)
+                nop();
+}
+
 static int comcerto_pcie_rd_conf(struct pcie_port *pp, int bus_nr,
 		u32 devfn, int where, int size, u32 *val)
 {
@@ -318,6 +326,15 @@
 
 
 	*val = readl_relaxed(address);
+
+	/* Because of the imprecise external abort the processor is not able to get the exact instruction 
+           which caused the abort and hence when the abort handler tries to restore the PC to the next 
+           instruction to resume it is often wrong and it results in skipping few instruction after the 
+           readl_relaxed which has caused abort. So nop instructions are added after readl so that even 
+           if the some instructions are missed out it will miss the nop instruction only.
+	*/
+	nop_delay();
+
 	if (size == 1)
 		*val = (*val >> (8 * (where & 3))) & 0xff;
 	else if (size == 2)
@@ -564,10 +581,15 @@
 static void handle_msi(struct pcie_port *pp)
 {
 	unsigned long val, mask;
-	unsigned int pos = 0, mask0;
+	unsigned int pos, mask0;
+
 
 	val = readl_relaxed(pp->va_dbi_base + PCIE_MSI_INTR0_STATUS);
 
+continue_handle:
+
+	pos = 0;
+
 	while (val) {
 		mask0 = 1 << pos;
 
@@ -580,13 +602,18 @@
 			mask = readl_relaxed(pp->va_dbi_base + PCIE_MSI_INTR0_ENABLE);
 			writel_relaxed(mask & ~mask0, pp->va_dbi_base + PCIE_MSI_INTR0_ENABLE);
 			writel_relaxed(mask0, pp->va_dbi_base + PCIE_MSI_INTR0_STATUS);
-			writel_relaxed(mask & mask0, pp->va_dbi_base + PCIE_MSI_INTR0_ENABLE);
+			writel_relaxed(mask, pp->va_dbi_base + PCIE_MSI_INTR0_ENABLE);
 			spin_unlock(&pp->intr_lock);
 			generic_handle_irq(pp->msi_base	+ pos);
 			val = val & ~mask0;
 		}
 		pos++;
 	}
+
+	val = readl_relaxed(pp->va_dbi_base + PCIE_MSI_INTR0_STATUS);
+	if(val)
+		goto continue_handle;
+
 #if 0
 	for (i = 0; i < (PCIE_NUM_MSI_IRQS >> 5); i++) {
 		val = readl_relaxed(pp->va_dbi_base + PCIE_MSI_INTR0_STATUS + (i * 12));
@@ -1113,12 +1140,12 @@
 		val &= ~(0xFF);
 		val |= 0xF1;
 		comcerto_dbi_write_reg(pp, PCIE_G2CTRL_REG, 4, val);
-	}
 
-	// instruct pcie to switch to gen2 after init
-        comcerto_dbi_read_reg(pp, PCIE_G2CTRL_REG, 4, &val);
-        val |= (1 << 17);
-        comcerto_dbi_write_reg(pp, PCIE_G2CTRL_REG, 4, val);
+		// instruct pcie to switch to gen2 after init
+		comcerto_dbi_read_reg(pp, PCIE_G2CTRL_REG, 4, &val);
+		val |= (1 << 17);
+		comcerto_dbi_write_reg(pp, PCIE_G2CTRL_REG, 4, val);
+	}
 
 	/*setup iATU for outbound translation */
 	PCIE_SETUP_iATU_OB_ENTRY( pp, iATU_ENTRY_MEM, iATU_GET_MEM_BASE(pp->remote_mem_baseaddr),
@@ -1408,7 +1435,7 @@
 {
 	struct platform_device *pdev = to_platform_device(dev);
         struct pcie_port *pp = &pcie_port[pdev->id];
-	int reset = 0;
+	int reset = 0, rc;
 
 	if (!pcie_port_is_host(pdev->id) ||  !(pp->link_state))
 			return count;
@@ -1451,12 +1478,25 @@
 			udelay(1000);
 		}
 
+		printk(KERN_INFO "Disabling PCIe%d Controler Clock\n", pdev->id);
+
+		if (pcie_port[pdev->id].port_mode != PCIE_PORT_MODE_NONE)
+			clk_disable(pcie_port[pdev->id].ref_clock);
+
 		printk(KERN_INFO "EXIT : Putting PCIe%d device into reset\n", pdev->id);
 	}
 	else {
 
 		printk(KERN_INFO "ENTER: Bringing PCIe%d device outof reset\n", pdev->id);
 
+		printk(KERN_INFO "Enabling PCIe%d Controler Clock\n", pdev->id);
+
+		if(pcie_port[pdev->id].port_mode != PCIE_PORT_MODE_NONE) {
+			rc = clk_enable(pcie_port[pdev->id].ref_clock);
+			if (rc)
+				pr_err("%s: PCIe%d clock enable failed\n", __func__, pdev->id);
+		}
+
 		if (!comcerto_pcie_device_reset_exit(pp)) {
 			pp->reset = 0;
 		}
@@ -1467,58 +1507,147 @@
 
 static DEVICE_ATTR(device_reset, 0644, comcerto_pcie_show_reset, comcerto_pcie_set_reset);
 
-#ifdef CONFIG_PM
-static int comcerto_pcie_suspend(struct platform_device *pdev, pm_message_t state)
+
+static ssize_t comcerto_pcie_serdes_pd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
 {
-	unsigned int val, i;
+	struct platform_device *pdev = to_platform_device(dev);
+        struct pcie_port *pp = &pcie_port[pdev->id];
+	int reset = 0, rc;
 
-	printk(KERN_INFO "%s: pcie device %p (id = %d): state %d\n", 
-		__func__, pdev, pdev->id, state.event);
+	sscanf(buf, "%d", &reset);
 
-	if (pcie_port[pdev->id].port_mode != PCIE_PORT_MODE_NONE) {
-		if (comcerto_pcie_link_up(&pcie_port[pdev->id])){
+	reset = reset ? 1:0;
 
-		    /* Enable PME to root Port */
-		    comcerto_dbi_read_reg(&pcie_port[pdev->id], (PCI_CAP_PM + PCI_PM_CTRL), 4, &val);
-		    comcerto_dbi_write_reg(&pcie_port[pdev->id], (PCI_CAP_PM + PCI_PM_CTRL), 4, val | PCI_PM_CTRL_STATE_MASK);
+	if (reset) {
+		printk(KERN_INFO "%s: Putting Serdes to Low Power and CMU Power Off\n", __func__);
 
-			/* Required PM Delay */
-		    for (i = 0 ; i < 40 ; i++)
-			    udelay(500);
-	    }
+		if (pdev->id)
+			writel(readl(USBPHY_SERDES_STAT_BASE+0x44) | ((0x3 << 2)|(0x1 << 7)) , USBPHY_SERDES_STAT_BASE+0x44);
+		else
+			writel(readl(USBPHY_SERDES_STAT_BASE+0x34) | ((0x3 << 2)|(0x1 << 7)) , USBPHY_SERDES_STAT_BASE+0x34);
+	} else {
+		printk(KERN_INFO "%s: Getting Serdes out of Low Power and CMU Power On\n", __func__);
+
+		if (pdev->id)
+			writel(readl(USBPHY_SERDES_STAT_BASE+0x44) & ~((0x3 << 2)|(0x1 << 7)) , USBPHY_SERDES_STAT_BASE+0x44);
+		else
+			writel(readl(USBPHY_SERDES_STAT_BASE+0x34) & ~((0x3 << 2)|(0x1 << 7)) , USBPHY_SERDES_STAT_BASE+0x34);
 	}
 
+	return count;
+}
+
+static DEVICE_ATTR(serdes_pd, 0644, NULL, comcerto_pcie_serdes_pd);
+
+
+#ifdef CONFIG_PM
+static int no_irq_resume ;
+
+static int comcerto_pcie_suspend(struct device *dev)
+{
+	unsigned int val, i;
+	struct platform_device *pdev = to_platform_device(dev);
+
+	printk(KERN_INFO "%s: pcie device %p (id = %d):\n",
+			 __func__, pdev, pdev->id);
+
+	/* Check for the BitMask bit for PCIe, if it is enabled
+	 * then we are not going suspend the PCIe device , as by
+	 * this device , we will wake from System Resume.
+	 */
+	if ( !(host_utilpe_shared_pmu_bitmask & PCIe0_IRQ) || !(host_utilpe_shared_pmu_bitmask & PCIe1_IRQ) ){
+ 		/* We will Just return from here */
+		return 0;
+	}
+	if (pcie_port[pdev->id].port_mode != PCIE_PORT_MODE_NONE) {
+		if (comcerto_pcie_link_up(&pcie_port[pdev->id])){
+			/* Enable PME to root Port */
+			comcerto_dbi_read_reg(&pcie_port[pdev->id], (PCI_CAP_PM + PCI_PM_CTRL), 4, &val);
+			comcerto_dbi_write_reg(&pcie_port[pdev->id], (PCI_CAP_PM + PCI_PM_CTRL), 4, val | PCI_PM_CTRL_STATE_MASK);
+			/* Required PM Delay */
+			for (i = 0 ; i < 40 ; i++)
+				udelay(500);
+		}
+	}
+	no_irq_resume =0 ;
 	return 0;
 }
 
-static int comcerto_pcie_resume(struct platform_device *pdev)
+static int comcerto_pcie_resume(struct device *dev)
 {
 	unsigned int val, i;
+	struct platform_device *pdev = to_platform_device(dev);
 
 	printk(KERN_INFO "%s: pcie device %p (id = %d)\n", 
 		__func__, pdev, pdev->id);
+ 	/* Check for the Bit_Mask bit for PCIe, if it is enabled
+ 	 * then we are not going suspend the PCIe device , as by
+	 * this device , we will wake from System Resume.
+ 	*/
+	if ( !(host_utilpe_shared_pmu_bitmask & PCIe0_IRQ) || !(host_utilpe_shared_pmu_bitmask & PCIe1_IRQ) ){
 
-	if(pcie_port[pdev->id].port_mode != PCIE_PORT_MODE_NONE) {
-		if (comcerto_pcie_link_up(&pcie_port[pdev->id])){
-
-		    /* Put In D0 State */
-		    comcerto_dbi_read_reg(&pcie_port[pdev->id], (PCI_CAP_PM + PCI_PM_CTRL), 4, &val);
-		    comcerto_dbi_write_reg(&pcie_port[pdev->id], (PCI_CAP_PM + PCI_PM_CTRL), 4, val & (~PCI_PM_CTRL_STATE_MASK));
-
-			/* Required PM Delay */
-		    for (i = 0 ; i < 40 ; i++)
-			    udelay(500);
-	    }
+ 		/* We will Just return
+ 		*/
+		return 0;
 	}
+	if( no_irq_resume == 0)
+       	{
+		if(pcie_port[pdev->id].port_mode != PCIE_PORT_MODE_NONE) {
+			if (comcerto_pcie_link_up(&pcie_port[pdev->id])){
+		    		/* Put In D0 State */
+		    		comcerto_dbi_read_reg(&pcie_port[pdev->id], (PCI_CAP_PM + PCI_PM_CTRL), 4, &val);
+		    		comcerto_dbi_write_reg(&pcie_port[pdev->id], (PCI_CAP_PM + PCI_PM_CTRL), 4, val & (~PCI_PM_CTRL_STATE_MASK));
 
+				/* Required PM Delay */
+		    		for (i = 0 ; i < 40 ; i++)
+			    		udelay(500);
+	    		}	
+		}	
+	}
 	return 0;
 }
 
-static struct platform_driver comcerto_pcie_driver = {
+static int comcerto_pcie_noirq_resume(struct device *dev)
+{
+	int val,i;
+	struct platform_device *pdev = to_platform_device(dev);
+
+	printk(KERN_INFO "%s: pcie device %p (id = %d)\n",
+			 __func__, pdev, pdev->id);
+
+	/* Check for the Bit_Mask bit for PCIe, if it is enabled
+	 * then we are not going suspend the PCIe device , as by
+	 * this device , we will wake from System Resume.
+	 */
+	if ( !(host_utilpe_shared_pmu_bitmask & PCIe0_IRQ) || !(host_utilpe_shared_pmu_bitmask & PCIe1_IRQ) ){
+               /* We will Just return
+                */
+		return 0;
+	}
+	if(pcie_port[pdev->id].port_mode != PCIE_PORT_MODE_NONE) {
+		if (comcerto_pcie_link_up(&pcie_port[pdev->id])){
+			/* Put In D0 State */
+			comcerto_dbi_read_reg(&pcie_port[pdev->id], (PCI_CAP_PM + PCI_PM_CTRL), 4, &val);
+			comcerto_dbi_write_reg(&pcie_port[pdev->id], (PCI_CAP_PM + PCI_PM_CTRL), 4, val & (~PCI_PM_CTRL_STATE_MASK));
+			/* Required PM Delay */
+			for (i = 0 ; i < 40 ; i++)
+				udelay(500);
+	    	}
+	}	
+	return 0;
+}
+
+static const struct dev_pm_ops pcie_platform_pm_ops = {
 	.suspend = comcerto_pcie_suspend,
 	.resume = comcerto_pcie_resume,
+	.resume_noirq = comcerto_pcie_noirq_resume,
+};
+
+
+static struct platform_driver comcerto_pcie_driver = {
 	.driver = {
 		.name = "pcie",
+		.pm   = &pcie_platform_pm_ops,
 		.owner = THIS_MODULE,
 	},
 };
@@ -1617,6 +1746,15 @@
 
 	mdelay(1); //After CMU locks wait for sometime
 
+#if defined(CONFIG_C2K_MFCN_EVM)
+	if(nr == 0){
+		GPIO_reset_external_device(COMPONENT_PCIE0,0);
+	}else{
+		GPIO_reset_external_device(COMPONENT_PCIE1,0);
+	}
+
+	mdelay(1);
+#endif
 	//Bring PCIe out of reset
 	c2000_block_reset(pcie_component,0);
 
@@ -1670,6 +1808,7 @@
 	int polarity;
 	int ret;
 
+
 	if (nr >= NUM_PCIE_PORTS) {
 		printk("%s : Invalid PCIe port number\n", __func__);
 		goto err0;
@@ -1740,6 +1879,13 @@
 
 }
 
+static int comcerto_pcie_abort_handler(unsigned long addr, unsigned int fsr,
+                                      struct pt_regs *regs)
+{
+        if (fsr & (1 << 10))
+                regs->ARM_pc += 4;
+        return 0;
+}
 
 
 static int __init comcerto_pcie_init(void)
@@ -1767,6 +1913,8 @@
 	pcibios_min_mem = COMCERTO_AXI_PCIe0_SLAVE_BASE;
 	pci_add_flags(PCI_REASSIGN_ALL_RSRC);
 
+	hook_fault_code(16 + 6, comcerto_pcie_abort_handler, SIGBUS, 0, "imprecise external abort");
+
 	pci_common_init(&comcerto_pcie);
 
 	for ( i = 0; i < num_pcie_port; i++ )
@@ -1792,11 +1940,17 @@
 	if (device_create_file(&pcie_pwr0.dev, &dev_attr_device_reset))
 		printk(KERN_ERR "%s: Unable to create pcie0 reset sysfs entry\n", __func__);
 
+	if (device_create_file(&pcie_pwr0.dev, &dev_attr_serdes_pd))
+		printk(KERN_ERR "%s: Unable to create pcie0 serdes_pd sysfs entry\n", __func__);
+
 	if(num_pcie_port > 1) {
 		platform_device_register(&pcie_pwr1);
 
 		if (device_create_file(&pcie_pwr1.dev, &dev_attr_device_reset))
 			printk(KERN_ERR "%s: Unable to create pcie1 reset sysfs entry\n", __func__);
+
+		if (device_create_file(&pcie_pwr1.dev, &dev_attr_serdes_pd))
+			printk(KERN_ERR "%s: Unable to create pcie1 serdes_pd sysfs entry\n", __func__);
 	}
 
 	platform_driver_register(&comcerto_pcie_driver);
diff --git a/arch/arm/mach-comcerto/pm.c b/arch/arm/mach-comcerto/pm.c
new file mode 100644
index 0000000..128ee9d
--- /dev/null
+++ b/arch/arm/mach-comcerto/pm.c
@@ -0,0 +1,408 @@
+/*
+ * arch/arm/mach-comcerto/pm.c
+ * C2K Power Management
+ *
+ * Copyright (C) 2012 Mindspeed
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/suspend.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <linux/interrupt.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <asm/suspend.h>
+#include <asm/irq.h>
+#include <linux/atomic.h>
+#include <asm/mach/time.h>
+#include <asm/mach/irq.h>
+#include <linux/console.h>
+
+
+#include <linux/smp.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+
+#include <asm/cacheflush.h>
+#include <mach/hardware.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach-types.h>
+#include <asm/smp_scu.h>
+#include <mach/comcerto-2000/pm.h>
+#include <linux/gpio.h>
+
+unsigned int host_utilpe_shared_pmu_bitmask;
+
+/* Externs */
+extern void comcerto_cpu_suspend(int save_state);
+extern	unsigned int * c2k_get_restore_pointer(void);
+extern void comcerto_cpu_restore (void);
+
+unsigned int c2k_pm_bitmask_show(void)
+{
+	return host_utilpe_shared_pmu_bitmask;
+}
+
+void c2k_pm_bitmask_store(unsigned int bitmask_value)
+{
+	/*
+	 * Initialize the shared pmu bitmask
+	 * This information can be configurable run time.
+	 * Can be passed from bootloader also (Not Implimented Yet)
+	 */
+	//host_utilpe_shared_pmu_bitmask = 0xFFE7FFFF;
+	host_utilpe_shared_pmu_bitmask = bitmask_value;
+
+	/* Pass the bitmask info to UtilPE */
+	*(((volatile unsigned int *)(HOST_UTILPE_SHARED_ADDRESS))+4) = host_utilpe_shared_pmu_bitmask;
+}
+
+
+static int comcerto_do_sram_idle(unsigned long save_state)
+{
+	comcerto_cpu_suspend(save_state);
+	return 0;
+}
+
+/*------------------------- L2 Cache and SCU Save Resume ----------------------------*/
+#define SCU_DATA_SIZE                32
+#define L2_DATA_SIZE                 96
+
+extern void pl310_save(void);
+extern void pl310_resume(void);
+
+typedef struct
+{
+    /* 0x00 */  volatile unsigned int control;
+    /* 0x04 */  const unsigned int configuration;
+    /* 0x08 */  union
+                {
+                    volatile unsigned int w;
+                    volatile unsigned char b[4];
+                } power_status;
+    /* 0x0c */  volatile unsigned int invalidate_all;
+                char padding1[48];
+    /* 0x40 */  volatile unsigned int filtering_start;
+    /* 0x44 */  volatile unsigned int filtering_end;
+                char padding2[8];
+    /* 0x50 */  volatile unsigned int access_control;
+    /* 0x54 */  volatile unsigned int ns_access_control;
+} a9_scu_registers;
+
+
+void save_a9_scu(u32 *pointer, unsigned scu_address)
+{
+    a9_scu_registers *scu = (a9_scu_registers *)scu_address;
+
+    pointer[0] = scu->control;
+    pointer[1] = scu->power_status.w;
+    pointer[2] = scu->filtering_start;
+    pointer[3] = scu->filtering_end;
+    pointer[4] = scu->access_control;
+    pointer[5] = scu->ns_access_control;
+}
+
+void restore_a9_scu(u32 *pointer, unsigned scu_address)
+{
+    a9_scu_registers *scu = (a9_scu_registers *)scu_address;
+
+    scu->invalidate_all = 0xffff;
+    scu->filtering_start = pointer[2];
+    scu->filtering_end = pointer[3];
+    scu->access_control = pointer[4];
+    scu->ns_access_control = pointer[5];
+    scu->power_status.w = pointer[1];
+    scu->control = pointer[0];
+}
+
+
+struct lockdown_regs
+{
+    unsigned int d, i;
+};
+
+typedef struct
+{
+    /* 0x000 */ const unsigned cache_id;
+    /* 0x004 */ const unsigned cache_type;
+                char padding1[0x0F8];
+    /* 0x100 */ volatile unsigned control;
+    /* 0x104 */ volatile unsigned aux_control;
+    /* 0x108 */ volatile unsigned tag_ram_control;
+    /* 0x10C */ volatile unsigned data_ram_control;
+                char padding2[0x0F0];
+    /* 0x200 */ volatile unsigned ev_counter_ctrl;
+    /* 0x204 */ volatile unsigned ev_counter1_cfg;
+    /* 0x208 */ volatile unsigned ev_counter0_cfg;
+    /* 0x20C */ volatile unsigned ev_counter1;
+    /* 0x210 */ volatile unsigned ev_counter0;
+    /* 0x214 */ volatile unsigned int_mask;
+    /* 0x218 */ const volatile unsigned int_mask_status;
+    /* 0x21C */ const volatile unsigned int_raw_status;
+    /* 0x220 */ volatile unsigned int_clear;
+                char padding3[0x50C];
+    /* 0x730 */ volatile unsigned cache_sync;
+                char padding4[0x03C];
+    /* 0x770 */ volatile unsigned inv_pa;
+                char padding5[0x008];
+    /* 0x77C */ volatile unsigned inv_way;
+                char padding6[0x030];
+    /* 0x7B0 */ volatile unsigned clean_pa;
+                char padding7[0x004];
+    /* 0x7B8 */ volatile unsigned clean_index;
+    /* 0x7BC */ volatile unsigned clean_way;
+                char padding8[0x030];
+    /* 0x7F0 */ volatile unsigned clean_inv_pa;
+                char padding9[0x004];
+    /* 0x7F8 */ volatile unsigned clean_inv_index;
+    /* 0x7FC */ volatile unsigned clean_inv_way;
+                char paddinga[0x100];
+    /* 0x900 */ volatile struct lockdown_regs lockdown[8];
+                char paddingb[0x010];
+    /* 0x950 */ volatile unsigned lock_line_en;
+    /* 0x954 */ volatile unsigned unlock_way;
+                char paddingc[0x2A8];
+    /* 0xC00 */ volatile unsigned addr_filtering_start;
+    /* 0xC04 */ volatile unsigned addr_filtering_end;
+                char paddingd[0x338];
+    /* 0xF40 */ volatile unsigned debug_ctrl;
+                char paddinge[0x01C];
+    /* 0xF60 */ volatile unsigned prefetch_ctrl;
+                char paddingf[0x01C];
+    /* 0xF80 */ volatile unsigned power_ctrl;
+} pl310_registers;
+
+
+typedef struct
+{
+    unsigned int aux_control;
+    unsigned int tag_ram_control;
+    unsigned int data_ram_control;
+    unsigned int ev_counter_ctrl;
+    unsigned int ev_counter1_cfg;
+    unsigned int ev_counter0_cfg;
+    unsigned int ev_counter1;
+    unsigned int ev_counter0;
+    unsigned int int_mask;
+    unsigned int lock_line_en;
+    struct lockdown_regs lockdown[8];
+    unsigned int unlock_way;
+    unsigned int addr_filtering_start;
+    unsigned int addr_filtering_end;
+    unsigned int debug_ctrl;
+    unsigned int prefetch_ctrl;
+    unsigned int power_ctrl;
+} pl310_context;
+
+
+void save_pl310(u32 *pointer, unsigned int pl310_address)
+{
+    pl310_registers *pl310 = (pl310_registers *)pl310_address;
+    pl310_context *context = (pl310_context *)pointer;
+    int i;
+
+    /* TODO: are all these registers are present in earlier PL310 versions? */
+    context->aux_control = pl310->aux_control;
+    context->tag_ram_control = pl310->tag_ram_control;
+    context->data_ram_control = pl310->data_ram_control;
+    context->ev_counter_ctrl = pl310->ev_counter_ctrl;
+    context->ev_counter1_cfg = pl310->ev_counter1_cfg;
+    context->ev_counter0_cfg = pl310->ev_counter0_cfg;
+    context->ev_counter1 = pl310->ev_counter1;
+    context->ev_counter0 = pl310->ev_counter0;
+    context->int_mask = pl310->int_mask;
+    context->lock_line_en = pl310->lock_line_en;
+
+    for (i=0; i<8; ++i)
+    {
+        context->lockdown[i].d = pl310->lockdown[i].d;
+        context->lockdown[i].i = pl310->lockdown[i].i;
+    }
+    context->addr_filtering_start = pl310->addr_filtering_start;
+    context->addr_filtering_end = pl310->addr_filtering_end;
+    context->debug_ctrl = pl310->debug_ctrl;
+    context->prefetch_ctrl = pl310->prefetch_ctrl;
+    context->power_ctrl = pl310->power_ctrl;
+}
+
+void restore_pl310(u32 *pointer, unsigned int pl310_address)
+{
+    pl310_registers *pl310 = (pl310_registers *)pl310_address;
+    pl310_context *context = (pl310_context *)pointer;
+    int i;
+
+    /* We may need to disable the PL310 if the boot code has turned it on */
+    if (pl310->control)
+    {
+        /* Wait for the cache to be idle, then disable */
+        pl310->cache_sync = 0;
+        dsb();
+        pl310->control = 0;
+    }
+
+    /* TODO: are all these registers present in earlier PL310 versions? */
+    pl310->aux_control = context->aux_control;
+    pl310->tag_ram_control = context->tag_ram_control;
+    pl310->data_ram_control = context->data_ram_control;
+    pl310->ev_counter_ctrl = context->ev_counter_ctrl;
+    pl310->ev_counter1_cfg = context->ev_counter1_cfg;
+    pl310->ev_counter0_cfg = context->ev_counter0_cfg;
+    pl310->ev_counter1 = context->ev_counter1;
+    pl310->ev_counter0 = context->ev_counter0;
+    pl310->int_mask = context->int_mask;
+    pl310->lock_line_en = context->lock_line_en;
+    for (i=0; i<8; ++i)
+    {
+        pl310->lockdown[i].d = context->lockdown[i].d;
+        pl310->lockdown[i].i= context->lockdown[i].i;
+    }
+    pl310->addr_filtering_start = context->addr_filtering_start;
+    pl310->addr_filtering_end = context->addr_filtering_end;
+    pl310->debug_ctrl = context->debug_ctrl;
+    pl310->prefetch_ctrl = context->prefetch_ctrl;
+    pl310->power_ctrl = context->power_ctrl;
+    dsb();
+    pl310->control = 1;
+    dsb();
+}
+
+/*------------------------- L2 Cache and SCU Save Resume ----------------------------*/
+
+static void C2k_pm_suspend(void)
+{
+	/* Variable to tell what needs to be saved and restored
+     	 * in C2k_pm_suspend_new */
+
+	/* save_state = 0 => Nothing to save and restored */
+	/* save_state = 1 => Only L1 and logic lost */
+	/* save_state = 2 => Only L2 lost */
+	/* save_state = 3 => L1, L2 and logic lost */
+ 	int save_state = 3;
+	unsigned int * p0;
+
+	unsigned int scu_data[SCU_DATA_SIZE];
+	unsigned int pl310_data[L2_DATA_SIZE];
+
+
+	printk(KERN_INFO "PM: C2000 Device is trying to enter Suspend mode ...\n");
+
+	p0 = (unsigned int *) comcerto_cpu_restore;
+
+	__raw_writel(virt_to_phys((unsigned int)p0), phys_to_virt(0x20));
+	__raw_writel((unsigned int)JUMP_TO_RESUME_1 , phys_to_virt(0x00));
+	__raw_writel((unsigned int)JUMP_TO_RESUME_2 , phys_to_virt(0x04));
+	smp_wmb();
+	__cpuc_flush_dcache_area((void *)phys_to_virt(0x00), 0x24);
+	outer_clean_range(__pa(phys_to_virt(0x00)), __pa(phys_to_virt(0x24)));
+
+	printk(KERN_INFO "PM: C2000 Jump Location Installed ... -- 0x%x  -- 0x%x  -- 0x%x \n", (unsigned int)p0, (unsigned int)comcerto_cpu_restore, virt_to_phys((unsigned int)p0));
+
+	printk(KERN_INFO "PM: Saving SCU Context ...\n");
+	save_a9_scu(&scu_data[0], (unsigned int *)COMCERTO_SCU_VADDR);
+
+	printk(KERN_INFO "PM: Saving L2 Cache Context ...\n");
+	save_pl310(&pl310_data[0], (unsigned int *)COMCERTO_L310_VADDR);
+
+	/* Pass the bitmask information to the uTilPE */
+	*(((volatile unsigned int *)(HOST_UTILPE_SHARED_ADDRESS))+4) = host_utilpe_shared_pmu_bitmask;
+
+	printk(KERN_INFO "PM: Going to Suspend ...\n");
+
+	cpu_suspend(save_state, comcerto_do_sram_idle);
+
+	restore_a9_scu(&scu_data[0], (unsigned int *)COMCERTO_SCU_VADDR);
+	restore_pl310(&pl310_data[0], (unsigned int *)COMCERTO_L310_VADDR);
+
+	printk(KERN_INFO "PM: C2000  is re-starting from Suspend State ...\n");
+
+	return;
+}
+
+/*  C2k_pm_enter
+ *  @state:         State we're entering.
+ */
+
+static int C2k_pm_enter(suspend_state_t state)
+{
+	switch(state)
+	{
+
+	case PM_SUSPEND_STANDBY:
+	case PM_SUSPEND_MEM:
+		C2k_pm_suspend();
+		break;
+	default:
+		return -EINVAL;
+	}
+	pr_info("PM: C2000 Leaving C2k_pm_enter \n");
+	return 0;
+}
+
+static int C2k_pm_valid_state(suspend_state_t state)
+{
+	switch (state) {
+		case PM_SUSPEND_ON:
+		case PM_SUSPEND_STANDBY:
+		case PM_SUSPEND_MEM:
+			return 1;
+		default:
+			return 0;
+	}
+}
+
+static suspend_state_t target_state;
+
+/*
+ * Called after processes are frozen, but before we shutdown devices.
+ */
+static int C2k_pm_begin(suspend_state_t state)
+{
+	target_state = state;
+	return 0;
+}
+
+/*
+ * Called right prior to thawing processes.
+ */
+static void C2k_pm_finish(void)
+{
+	printk(KERN_INFO "Suspend process is completed, Wait for C2000 device to resume \n");
+}
+
+
+/*
+ * Called right prior to thawing processes.
+ */
+static void C2k_pm_end(void)
+{
+	printk(KERN_INFO "Resume process is completed, C2000 device is Power on Again \n");
+        target_state = PM_SUSPEND_ON;
+}
+
+
+static const struct platform_suspend_ops C2k_pm_ops = {
+	.valid	   = C2k_pm_valid_state,
+	.begin     = C2k_pm_begin,
+	.enter     = C2k_pm_enter,
+	.finish    = C2k_pm_finish,
+	.end       = C2k_pm_end,
+};
+
+static int __init C2k_pm_init(void)
+{
+	printk(KERN_INFO "Power Management Mode Support For C2000: \n");
+
+	suspend_set_ops(&C2k_pm_ops);
+        return 0;
+}
+arch_initcall(C2k_pm_init);
+
diff --git a/arch/arm/mach-comcerto/reset.c b/arch/arm/mach-comcerto/reset.c
index 5051344..d87333a 100644
--- a/arch/arm/mach-comcerto/reset.c
+++ b/arch/arm/mach-comcerto/reset.c
@@ -27,8 +27,11 @@
 #include <asm/io.h>
 #include <linux/spinlock.h>
 
+#include <linux/gpio.h>
+
 static char i2cspi_state[2],dus_state[3];
 spinlock_t reset_lock;
+static spinlock_t gpio_lock;
 
 
 void comcerto_rst_cntrl_set(unsigned int dev_rst_cntrl_bit)
@@ -378,6 +381,144 @@
 }
 EXPORT_SYMBOL(c2000_block_reset);
 
+#if defined(CONFIG_C2K_MFCN_EVM)
+void GPIO_reset_external_device(int block,int state)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&gpio_lock, flags);
+
+	/* Blocks to be put in out of Reset and reset mode
+	 * 0 ----> out of reset
+	 * 1 ----> reset
+	 */
+	switch (block){
+		case COMPONENT_ATHEROS_SWITCH:
+			if(gpio_request_one(GPIO_PIN_NUM_5, GPIOF_OUT_INIT_HIGH, GPIO_PIN_DESC_5)){
+				printk(KERN_ERR "%s:%d: Cannot request gpio for gpio-%d\n", \
+						__func__, __LINE__, GPIO_PIN_NUM_4);
+				return;
+			}
+
+			if (state){
+				gpio_set_value(GPIO_PIN_NUM_5, GPIO_SET_0);
+
+				gpio_direction_input(GPIO_PIN_NUM_5);
+			}else{
+				gpio_set_value(GPIO_PIN_NUM_5, GPIO_SET_0);
+
+				gpio_direction_output(GPIO_PIN_NUM_5, GPIO_SET_0);
+
+				gpio_set_value(GPIO_PIN_NUM_5, GPIO_SET_1);
+			}
+
+			gpio_free(GPIO_PIN_NUM_5);
+
+			break;
+
+		case COMPONENT_SLIC:
+			if(gpio_request_one(GPIO_PIN_NUM_4, GPIOF_OUT_INIT_HIGH, GPIO_PIN_DESC_4)){
+				printk(KERN_ERR "%s:%d: Cannot request gpio for gpio-%d\n", \
+						__func__, __LINE__, GPIO_PIN_NUM_4);
+				return;
+			}
+
+			if (state){
+				gpio_set_value(GPIO_PIN_NUM_4, GPIO_SET_0);
+
+				gpio_direction_input(GPIO_PIN_NUM_4);
+			}else{
+				gpio_set_value(GPIO_PIN_NUM_4, GPIO_SET_0);
+
+				gpio_direction_output(GPIO_PIN_NUM_4, GPIO_SET_0);
+
+				gpio_set_value(GPIO_PIN_NUM_4, GPIO_SET_1);
+			}
+
+			gpio_free(GPIO_PIN_NUM_4);
+
+			break;
+
+		case COMPONENT_PCIE0:
+			if (state){
+				writel(readl(COMCERTO_GPIO_63_32_PIN_OUTPUT) & ~GPIO_PIN_48, COMCERTO_GPIO_63_32_PIN_OUTPUT);
+				writel( readl(COMCERTO_GPIO_63_32_PIN_OUTPUT_EN) | GPIO_PIN_48, COMCERTO_GPIO_63_32_PIN_OUTPUT_EN);
+				writel(readl(COMCERTO_GPIO_63_32_PIN_SELECT) & ~GPIO_PIN_48, COMCERTO_GPIO_63_32_PIN_SELECT);
+			}else{
+				writel(readl(COMCERTO_GPIO_63_32_PIN_OUTPUT) & ~GPIO_PIN_48, COMCERTO_GPIO_63_32_PIN_OUTPUT);
+				writel( readl(COMCERTO_GPIO_63_32_PIN_OUTPUT_EN) & ~GPIO_PIN_48, COMCERTO_GPIO_63_32_PIN_OUTPUT_EN);
+				writel(readl(COMCERTO_GPIO_63_32_PIN_OUTPUT) | GPIO_PIN_48, COMCERTO_GPIO_63_32_PIN_OUTPUT);
+				writel(readl(COMCERTO_GPIO_63_32_PIN_SELECT) | GPIO_PIN_48, COMCERTO_GPIO_63_32_PIN_SELECT);
+			}
+			break;
+		case COMPONENT_PCIE1:
+			if (state){
+				writel(readl(COMCERTO_GPIO_63_32_PIN_OUTPUT) & ~GPIO_PIN_47, COMCERTO_GPIO_63_32_PIN_OUTPUT);
+				writel( readl(COMCERTO_GPIO_63_32_PIN_OUTPUT_EN) | GPIO_PIN_47, COMCERTO_GPIO_63_32_PIN_OUTPUT_EN);
+				writel(readl(COMCERTO_GPIO_63_32_PIN_SELECT) & ~GPIO_PIN_47, COMCERTO_GPIO_63_32_PIN_SELECT);
+			}else{
+				writel(readl(COMCERTO_GPIO_63_32_PIN_OUTPUT) & ~GPIO_PIN_47, COMCERTO_GPIO_63_32_PIN_OUTPUT);
+				writel( readl(COMCERTO_GPIO_63_32_PIN_OUTPUT_EN) & ~GPIO_PIN_47, COMCERTO_GPIO_63_32_PIN_OUTPUT_EN);
+				writel(readl(COMCERTO_GPIO_63_32_PIN_OUTPUT) | GPIO_PIN_47, COMCERTO_GPIO_63_32_PIN_OUTPUT);
+				writel(readl(COMCERTO_GPIO_63_32_PIN_SELECT) | GPIO_PIN_47, COMCERTO_GPIO_63_32_PIN_SELECT);
+			}
+			break;
+		case COMPONENT_USB_HUB:
+			if (state){
+				writel(readl(COMCERTO_GPIO_63_32_PIN_OUTPUT) & ~GPIO_PIN_50, COMCERTO_GPIO_63_32_PIN_OUTPUT);
+				writel( readl(COMCERTO_GPIO_63_32_PIN_OUTPUT_EN) | GPIO_PIN_50, COMCERTO_GPIO_63_32_PIN_OUTPUT_EN);
+				writel(readl(COMCERTO_GPIO_63_32_PIN_SELECT) & ~GPIO_PIN_50, COMCERTO_GPIO_63_32_PIN_SELECT);
+			}else{
+				writel(readl(COMCERTO_GPIO_63_32_PIN_OUTPUT) & ~GPIO_PIN_50, COMCERTO_GPIO_63_32_PIN_OUTPUT);
+				writel( readl(COMCERTO_GPIO_63_32_PIN_OUTPUT_EN) & ~GPIO_PIN_50, COMCERTO_GPIO_63_32_PIN_OUTPUT_EN);
+				writel(readl(COMCERTO_GPIO_63_32_PIN_OUTPUT) | GPIO_PIN_50, COMCERTO_GPIO_63_32_PIN_OUTPUT);
+				writel(readl(COMCERTO_GPIO_63_32_PIN_SELECT) | GPIO_PIN_50, COMCERTO_GPIO_63_32_PIN_SELECT);
+			}
+			break;
+		case COMPONENT_EXP_DAUGTHER_CARD:
+			if (state){
+				writel(readl(COMCERTO_GPIO_63_32_PIN_OUTPUT) & ~GPIO_PIN_49, COMCERTO_GPIO_63_32_PIN_OUTPUT);
+				writel( readl(COMCERTO_GPIO_63_32_PIN_OUTPUT_EN) | GPIO_PIN_49, COMCERTO_GPIO_63_32_PIN_OUTPUT_EN);
+				writel(readl(COMCERTO_GPIO_63_32_PIN_SELECT) & ~GPIO_PIN_49, COMCERTO_GPIO_63_32_PIN_SELECT);
+			}else{
+				writel(readl(COMCERTO_GPIO_63_32_PIN_OUTPUT) & ~GPIO_PIN_49, COMCERTO_GPIO_63_32_PIN_OUTPUT);
+				writel( readl(COMCERTO_GPIO_63_32_PIN_OUTPUT_EN) & ~GPIO_PIN_49, COMCERTO_GPIO_63_32_PIN_OUTPUT_EN);
+				writel(readl(COMCERTO_GPIO_63_32_PIN_OUTPUT) | GPIO_PIN_49, COMCERTO_GPIO_63_32_PIN_OUTPUT);
+				writel(readl(COMCERTO_GPIO_63_32_PIN_SELECT) | GPIO_PIN_49, COMCERTO_GPIO_63_32_PIN_SELECT);
+			}
+			break;
+		case COMPONENT_RGMII0:
+			if (state){
+				writel(readl(COMCERTO_GPIO_63_32_PIN_OUTPUT) & ~GPIO_PIN_46, COMCERTO_GPIO_63_32_PIN_OUTPUT);
+				writel( readl(COMCERTO_GPIO_63_32_PIN_OUTPUT_EN) | GPIO_PIN_46, COMCERTO_GPIO_63_32_PIN_OUTPUT_EN);
+				writel(readl(COMCERTO_GPIO_63_32_PIN_SELECT) & ~GPIO_PIN_46, COMCERTO_GPIO_63_32_PIN_SELECT);
+			}else{
+				writel(readl(COMCERTO_GPIO_63_32_PIN_OUTPUT) & ~GPIO_PIN_46, COMCERTO_GPIO_63_32_PIN_OUTPUT);
+				writel( readl(COMCERTO_GPIO_63_32_PIN_OUTPUT_EN) & ~GPIO_PIN_46, COMCERTO_GPIO_63_32_PIN_OUTPUT_EN);
+				writel(readl(COMCERTO_GPIO_63_32_PIN_OUTPUT) | GPIO_PIN_46, COMCERTO_GPIO_63_32_PIN_OUTPUT);
+				writel(readl(COMCERTO_GPIO_63_32_PIN_SELECT) | GPIO_PIN_46, COMCERTO_GPIO_63_32_PIN_SELECT);
+			}
+			break;
+		case COMPONENT_RGMII1:
+			if (state){
+				writel(readl(COMCERTO_GPIO_63_32_PIN_OUTPUT) & ~GPIO_PIN_45, COMCERTO_GPIO_63_32_PIN_OUTPUT);
+				writel( readl(COMCERTO_GPIO_63_32_PIN_OUTPUT_EN) | GPIO_PIN_45, COMCERTO_GPIO_63_32_PIN_OUTPUT_EN);
+				writel(readl(COMCERTO_GPIO_63_32_PIN_SELECT)& ~GPIO_PIN_45, COMCERTO_GPIO_63_32_PIN_SELECT);
+			}else{
+				writel(readl(COMCERTO_GPIO_63_32_PIN_OUTPUT) & ~GPIO_PIN_45, COMCERTO_GPIO_63_32_PIN_OUTPUT);
+				writel( readl(COMCERTO_GPIO_63_32_PIN_OUTPUT_EN) & ~GPIO_PIN_45, COMCERTO_GPIO_63_32_PIN_OUTPUT_EN);
+				writel(readl(COMCERTO_GPIO_63_32_PIN_OUTPUT) | GPIO_PIN_45, COMCERTO_GPIO_63_32_PIN_OUTPUT);
+				writel(readl(COMCERTO_GPIO_63_32_PIN_SELECT) | GPIO_PIN_45, COMCERTO_GPIO_63_32_PIN_SELECT);
+			}
+			break;
+		default:
+			break;
+	}
+
+	spin_unlock_irqrestore(&gpio_lock,flags);
+}
+EXPORT_SYMBOL(GPIO_reset_external_device);
+#endif
+
 void reset_init(void){
 
 	/*Initilize the DUS ,serde0/1/2 and I2CSPI dependancy values */
diff --git a/arch/arm/mach-comcerto/sleep.S b/arch/arm/mach-comcerto/sleep.S
new file mode 100644
index 0000000..5a5c275
--- /dev/null
+++ b/arch/arm/mach-comcerto/sleep.S
@@ -0,0 +1,284 @@
+/*
+ * arch/arm/mach-comcerto/sleep.S
+ *
+ * Author: Makarand Pawagi
+ *
+ * Copyright (C) 2013 Mindspeed Technologies, Inc.
+ * Copyright (c) 2003 ARM Limited
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/memory.h>
+
+#define SCRATCHPAD_SUSPEND_INDICATOR_LOC    (0xf0800000 + 0x2400)
+#define SCRATCHPAD_BASE_P       (0xf0800000+0x2400+0x100) /*IRAM_MEMORY_VADDR + Offset*/
+#define SCRATCHPAD_CPU_CONTEXT_LOC_OFFSET 0x70
+#define SCRATCHPAD_CPU_CONTEXT_LOC     SCRATCHPAD_BASE_P + SCRATCHPAD_CPU_CONTEXT_LOC_OFFSET
+
+
+	.text
+/* Function call to get the restore pointer for resume from OFF */
+ENTRY(c2k_get_restore_pointer)
+        stmfd   sp!, {lr}     @ save registers on stack
+	ARM ( adr	r0, comcerto_cpu_restore )
+        bic     r0, #PAGE_OFFSET
+        ldmfd   sp!, {pc}     @ restore regs and return
+ENTRY(get_restore_pointer_sz)
+        .word   . - get_restore_pointer_sz
+
+
+
+
+/*
+ * ======================
+ * == Idle entry point ==
+ * ======================
+ */
+
+/*
+ * Forces C2K into idle state
+ *
+ * comcerto_cpu_suspend() - This bit of code saves the CPU context if needed
+ * and executes the WFI instruction. Calling WFI effectively changes the
+ * power domains states to the desired target power states.
+ */
+	.align	3
+ENTRY(comcerto_cpu_suspend)
+	stmfd	sp!, {r4 - r11, lr}	@ save registers on stack
+
+	/*
+	 * r0 contains information about saving context:
+	 *   0 - No context lost
+	 *   1 - Only L1 and logic lost
+	 *   2 - Only L2 lost (Even L1 is retained we clean it along with L2)
+	 *   3 - Both L1 and L2 lost and logic lost
+	 */
+
+	/*
+	 * For OFF mode: save context and jump to WFI (comcerto_do_wfi)
+	 */
+	cmp	r0, #0x0		@ If no context save required,
+	beq	comcerto_do_wfi		@  jump to the WFI
+
+
+	/* Otherwise fall through to the save context code */
+comcerto_save_context_wfi:
+	/*
+	 * jump out to kernel flush routine
+	 *  - reuse that code is better
+	 *  - it executes in a cached space so is faster than refetch per-block
+	 *  - should be faster and will change with kernel
+	 *  - 'might' have to copy address, load and jump to it
+	 * Flush all data from the L1 data cache before disabling
+	 * SCTLR.C bit.
+	 */
+	ldr	r1, kernel_flush
+	mov	lr, pc
+	bx	r1
+
+	/*
+	 * Clear the SCTLR.C bit to prevent further data cache
+	 * allocation. Clearing SCTLR.C would make all the data accesses
+	 * strongly ordered and would not hit the cache.
+	 */
+	ARM ( mrc	p15, 0, r0, c1, c0, 0 )
+	ARM ( bic	r0, r0, #(1 << 2) )	@ Disable the C bit
+	ARM ( mcr	p15, 0, r0, c1, c0, 0 )
+	ARM ( isb )
+
+	/*
+	 * Invalidate L1 data cache. Even though only invalidate is
+	 * necessary exported flush API is used here. Doing clean
+	 * on already clean cache would be almost NOP.
+	 */
+	ldr	r1, kernel_flush
+	blx	r1
+	/*
+	 * The kernel doesn't interwork: v7_flush_dcache_all in particluar will
+	 * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
+	 * This sequence switches back to ARM.  Note that .align may insert a
+	 * nop: bx pc needs to be word-aligned in order to work.
+	 */
+ THUMB(	.thumb		)
+ THUMB(	.align		)
+ THUMB(	bx	pc	)
+ THUMB(	nop		)
+	.arm
+
+	b	comcerto_do_wfi
+
+/*
+ * Local variables
+ */
+kernel_flush:
+	.word v7_flush_dcache_all
+
+
+/* ===================================
+ * == WFI instruction => Enter idle ==
+ * ===================================
+ */
+
+/*
+ * Do WFI instruction
+ * Includes the resume path for non-OFF modes [TBD]
+ */
+	.align	3
+ENTRY(comcerto_do_wfi)
+
+	/* Store cpsr and spsr */
+        ldr 	r7, scratchpad_cpu_context_loc
+
+	/*mrs	r4, cpsr
+	mrs	r5, spsr
+	stmia	r7!, {r4-r5}*/
+
+
+
+	/* Saving all the banked registers */
+        mrs     r0, cpsr
+
+        /* Save the Undef mode reisters */
+        bic     r1, r0, #0x1f
+        orr     r1, r1, #0x1b
+        msr     cpsr_c, r1
+	ARM ( stmia	r7!, {r13-r14} )
+	mrs	r13, spsr
+	stmia	r7!, {r13}
+
+        /* Save the Abort mode reisters */
+        bic     r1, r0, #0x1f
+        orr     r1, r1, #0x17
+        msr     cpsr_c, r1
+	ARM ( stmia	r7!, {r13-r14} )
+	mrs	r13, spsr
+	stmia	r7!, {r13}
+
+        /* Save the IRQ mode reisters */
+        bic     r1, r0, #0x1f
+        orr     r1, r1, #0x12
+        msr     cpsr_c, r1
+	ARM ( stmia	r7!, {r13-r14} )
+	mrs	r13, spsr
+	stmia	r7!, {r13}
+
+        /* Save the FIQ mode reisters */
+        bic     r1, r0, #0x1f
+        orr     r1, r1, #0x11
+        msr     cpsr_c, r1
+	ARM ( stmia	r7!, {r8-r14} )
+	THUMB ( stmia	r7!, {r8-r12} )
+	mrs	r13, spsr
+	stmia	r7!, {r13}
+
+        /* Return to the original mode */
+        msr     cpsr_c, r0
+
+	/* We can Put DDR in self refresh mode here [TBD] */
+
+	/* Pass control to UtilPE */
+        ldr r4, scratchpad_reboot_indicator_loc
+        mov r5, #0xFF
+        str r5, [r4]
+
+	/* Data memory barrier and Data sync barrier */
+	dsb
+	dmb
+
+/*
+ * ===================================
+ * == WFI instruction => Enter idle ==
+ * ===================================
+ */
+	wfi				@ wait for interrupt
+
+/*
+ * =========================================
+ * ==  Resume path for non-OFF modes TBD  ==
+ * =========================================
+ */
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+/*
+ * ===================================
+ * TBD
+ * ===================================
+ */
+
+/*
+ * ==============================
+ * == Resume path for OFF mode ==
+ * ==============================
+ */
+
+/*
+ * The restore function is instaleed at 0x0 location
+ * of DDR.
+ */
+
+ENTRY(comcerto_cpu_restore)
+	ldr	r1, l2dis_3630
+	cmp	r1, #0x1		/*@ Test if L2 re-enable needed on 3630*/
+	bne	skipl2reen
+	mrc	p15, 0, r1, c1, c0, 1
+	orr	r1, r1, #2		/*@ re-enable L2 cache*/
+	mcr	p15, 0, r1, c1, c0, 1
+skipl2reen:
+
+
+	/* Now branch to the common CPU resume function */
+	blx	cpu_resume
+
+	.ltorg
+
+/*
+ * Local variables
+ */
+
+l2dis_3630:
+	.word
+
+
+ENTRY(copy_words)
+	cmp	r2, #0
+	beq	f1
+b0:
+	ldr	r3, [r1], #4
+	str	r3, [r0], #4
+	subs	r2, r2, #1
+	bne	b0
+f1:
+	bx	lr
+
+
+
+
+scratchpad_cpu_context_loc:
+        .word   SCRATCHPAD_CPU_CONTEXT_LOC
+scratchpad_reboot_indicator_loc:
+        .word   SCRATCHPAD_SUSPEND_INDICATOR_LOC
diff --git a/arch/arm/mach-comcerto/sysfstdm.c b/arch/arm/mach-comcerto/sysfstdm.c
index 1e12d59..0f50020 100644
--- a/arch/arm/mach-comcerto/sysfstdm.c
+++ b/arch/arm/mach-comcerto/sysfstdm.c
@@ -21,6 +21,7 @@
 #include <linux/string.h>
 #include <linux/platform_device.h>
 #include <asm/io.h>
+#include <asm/delay.h>
 #include <mach/comcerto-common.h>
 #include <asm/div64.h>
 #include <mach/comcerto-2000/clk-rst.h>
@@ -246,6 +247,13 @@
 		writel(TDM_CTRL_SLIC_RESET | COMCERTO_BLOCK_MSIF_DIV, TDM_CLK_CNTRL); /* TDM = NTG out / 12 */
 		writel((0x3 << 4) |(readl(COMCERTO_GPIO_MISC_PIN_SELECT) & ~(0x3 << 4)), COMCERTO_GPIO_MISC_PIN_SELECT);
 		writel(COMCERTO_BLOCK_MSIF_DIV, TDM_CLK_CNTRL); /* Remove out of reset */
+
+		/* Delay 100us after C2k TDM block has been un-reset, before SLIC is un-reset.
+		   Ensures MSIF interface clock is active well before SLIC is un-reset (SiLabs spec). 
+		*/
+		udelay(100);
+
+		writel(0x1 << 30, COMCERTO_GPIO_63_32_PIN_OUTPUT); /* remove slic out of reset */
 		break;
 
 	default:
diff --git a/crypto/ocf/Kconfig b/crypto/ocf/Kconfig
index 805cb4c..8b01ca5 100644
--- a/crypto/ocf/Kconfig
+++ b/crypto/ocf/Kconfig
@@ -39,4 +39,10 @@
 	  of OCF.  Also includes code to benchmark the IXP Access library
 	  for comparison.
 
+config OCF_DM_CRYPT
+	bool "ocf-dm-crypt (HW crypto engine)"
+	depends on OCF_OCF
+	help
+	OCF support for crypto offloading of dm-crypt
+
 endmenu
diff --git a/crypto/ocf/crypto.c b/crypto/ocf/crypto.c
index f48210d..7722361 100644
--- a/crypto/ocf/crypto.c
+++ b/crypto/ocf/crypto.c
@@ -812,11 +812,11 @@
 	cryptostats.cs_ops++;
 
 	CRYPTO_Q_LOCK();
-	if (crypto_q_cnt >= crypto_q_max) {
+	/*if (crypto_q_cnt >= crypto_q_max) {
 		cryptostats.cs_drops++;
 		CRYPTO_Q_UNLOCK();
 		return ENOMEM;
-	}
+	}*/
 	crypto_q_cnt++;
 
 	/* make sure we are starting a fresh run on this crp. */
diff --git a/crypto/ocf/cryptodev.h b/crypto/ocf/cryptodev.h
index 6b9e727..6067541 100644
--- a/crypto/ocf/cryptodev.h
+++ b/crypto/ocf/cryptodev.h
@@ -423,6 +423,7 @@
 #define CRYPTO_F_CBIFSYNC	0x0040	/* Do CBIMM if op is synchronous */
 
 	caddr_t		crp_buf;	/* Data to be processed */
+	caddr_t		crp_out_buf;	/* Crypto Result Buffer */
 	caddr_t		crp_opaque;	/* Opaque pointer, passed along */
 	struct cryptodesc *crp_desc;	/* Linked list of processing descriptors */
 
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index b175000..c697159 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -357,4 +357,35 @@
 	return (cap & 0x1f) + 1;
 }
 
+#if defined (CONFIG_COMCERTO_AHCI_PROF)
+
+#define MAX_AHCI_PORTS		4
+#define MAX_AHCI_SLOTS		32
+#define MAX_BINS		64
+#define US_SHIFT		8
+#define BYTE_SHIFT		14
+#define RATE_SHIFT		2
+
+struct ahci_port_stats {
+	struct timeval first_issue;
+	unsigned int pending_flag;
+	unsigned int nb_pending;
+	unsigned int nb_pending_max;
+	unsigned int nb_pending_total;
+	unsigned int bytes_pending;
+	unsigned int diff_us;
+	unsigned int pending_counter[MAX_BINS];
+	unsigned int rate_counter[MAX_BINS];
+
+	unsigned int init_prof;
+	unsigned int time_counter[MAX_BINS]; // 128us -> 16ms
+	unsigned int data_counter[MAX_BINS]; // 4K-> 1020K
+	unsigned int no_free_slot;
+	struct timeval last_req;
+};
+
+extern struct ahci_port_stats ahci_port_stats[MAX_AHCI_PORTS];
+extern unsigned int enable_ahci_prof;
+#endif
+
 #endif /* _AHCI_H */
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 1fab515..d1d9d58 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -23,7 +23,9 @@
 #include <linux/ahci_platform.h>
 #include <linux/clk.h>
 #include <mach/reset.h>
+#include <mach/comcerto-2000/pm.h>
 #include "ahci.h"
+#include <mach/serdes-c2000.h>
 
 #ifdef CONFIG_ARCH_M86XXX 
 /* SATA Clocks */
@@ -79,6 +81,20 @@
 {
         struct ata_host *host = platform_get_drvdata(pdev);
 	int ret=0;
+
+#ifdef CONFIG_ARCH_M86XXX
+	 /* Check for the Bit_Mask bit for SATA, if it is enabled
+	  * then we are not going suspend the SATA device , as by
+	  * this device , we will wake from System Resume.
+	 */
+	if ( !(host_utilpe_shared_pmu_bitmask & SATA_IRQ )){
+
+                /* We will Just return
+                */
+		return ret;
+	}
+#endif
+
         if (host)
 		ret = ata_host_suspend(host, state);
 
@@ -89,6 +105,14 @@
 		clk_disable(sata_clk);
 		clk_disable(sata_oob_clk);
 		clk_disable(sata_pmu_clk);
+
+		/* PM Performance Enhancement : SRDS1 PD SATA1/SRDS2 PD SATA2 - P2 state, */
+		/* Resets the entire PHY module and CMU power down */
+		if (readl(COMCERTO_GPIO_SYSTEM_CONFIG) & BOOT_SERDES1_CNF_SATA0)
+			writel((readl((COMCERTO_DWC1_CFG_BASE+0x44)) | 0xCC), (COMCERTO_DWC1_CFG_BASE+0x44));
+		else if (readl(COMCERTO_GPIO_SYSTEM_CONFIG) & BOOT_SERDES2_CNF_SATA1)
+			writel((readl((COMCERTO_DWC1_CFG_BASE+0x54)) | 0xCC), (COMCERTO_DWC1_CFG_BASE+0x54));
+
 	}
 #endif
 	
@@ -100,6 +124,25 @@
         struct ata_host *host = platform_get_drvdata(pdev);
 
 #ifdef CONFIG_ARCH_M86XXX
+	/* PM Performance Enhancement : SRDS1 PD SATA1/SRDS2 PD SATA2 - P2 state, */
+	/* Enable PHY module and CMU power UP */
+	if (readl(COMCERTO_GPIO_SYSTEM_CONFIG) & BOOT_SERDES1_CNF_SATA0)
+ 		writel((readl((COMCERTO_DWC1_CFG_BASE+0x44)) & ~0xCC), (COMCERTO_DWC1_CFG_BASE+0x44));
+	else if (readl(COMCERTO_GPIO_SYSTEM_CONFIG) & BOOT_SERDES2_CNF_SATA1)
+		writel((readl((COMCERTO_DWC1_CFG_BASE+0x54)) & ~0xCC), (COMCERTO_DWC1_CFG_BASE+0x54));
+
+	/* Check for the Bit_Mask bit for SATA, if it is enabled
+	 * then we are not going suspend the SATA device , as by
+	 * this device , we will wake from System Resume.
+	*/
+
+	if ( !(host_utilpe_shared_pmu_bitmask & SATA_IRQ )){
+
+                /* We will Just return
+                */
+		return 0;
+	}
+
 	/* Do the  clock enable here  PMU,OOB,AXI */
 	clk_enable(sata_clk);
 	clk_enable(sata_oob_clk);
@@ -281,8 +324,8 @@
 
 #ifdef CONFIG_ARCH_M86XXX
 		/* Optimized PFE/SATA DDR interaction,
-		limit burst size of SATA controller */
-		writel(0 , ahci_port_base(ap) + 0x70);
+		limit read burst size of SATA controller */
+		writel(0x41, ahci_port_base(ap) + 0x70);
 #endif
 
 		/* disabled/not-implemented port */
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 58542d1..a0e3c7c 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1726,20 +1726,6 @@
 
 /*
 */
-
-#if defined (CONFIG_COMCERTO_AHCI_PROF)
-unsigned int ahci_time_counter[256]; // 4 ms -> 1S
-unsigned int ahci_data_counter[256]; // 4K-> 1020K
-unsigned int ahci_int_before_req;
-static struct timeval last_ahci_req;
-unsigned int init_ahci_prof = 0;
-unsigned int enable_ahci_prof = 0;
-extern struct timeval ahci_last_qc_comp[32];
-extern unsigned int ahci_last_qc_comp_flag[32];
-#endif
-
-static struct timeval time;
-
 irqreturn_t ahci_interrupt(int irq, void *dev_instance)
 {
 	struct ata_host *host = dev_instance;
@@ -1807,33 +1793,6 @@
 	void __iomem *port_mmio = ahci_port_base(ap);
 	struct ahci_port_priv *pp = ap->private_data;
 
-#if defined(CONFIG_COMCERTO_AHCI_PROF)
-	struct timeval now;
-
-	if (enable_ahci_prof) {
-		do_gettimeofday(&now);
-
-		if (init_ahci_prof) {
-			int diff_time_ms;
-			diff_time_ms = ((now.tv_sec - last_ahci_req.tv_sec) * 1000) + ((now.tv_usec - last_ahci_req.tv_usec) / 1000);
-			if (diff_time_ms < 1000) {//Don't record more than 1s
-				ahci_time_counter[diff_time_ms >> 3]++;
-			}
-			else
-				ahci_time_counter[255]++;
-		}
-		else {
-			init_ahci_prof = 1;
-		}
-		last_ahci_req = now;
-
-		if (qc->nbytes < (1 << 21))
-			ahci_data_counter[(qc->nbytes >> 13) & 0xFF]++;
-		else
-			ahci_data_counter[255]++;
-	}
-#endif
-
 	/* Keep track of the currently active link.  It will be used
 	 * in completion path to determine whether NCQ phase is in
 	 * progress.
@@ -1850,10 +1809,55 @@
 		writel(fbs, port_mmio + PORT_FBS);
 		pp->fbs_last_dev = qc->dev->link->pmp;
 	}
+
 #if defined(CONFIG_COMCERTO_AHCI_PROF)
 	if (enable_ahci_prof) {
-		ahci_last_qc_comp[qc->tag] = now;
-		ahci_last_qc_comp_flag[qc->tag] = 1;
+		struct ahci_port_stats *stats = &ahci_port_stats[ap->port_no];
+		struct timeval now;
+		int bin;
+
+		do_gettimeofday(&now);
+
+		if (stats->init_prof) {
+			int diff_time_us;
+
+			diff_time_us = (now.tv_sec - stats->last_req.tv_sec) * 1000 * 1000 + (now.tv_usec - stats->last_req.tv_usec);
+
+			bin = diff_time_us >> US_SHIFT;
+			if (bin >= MAX_BINS)
+				bin = MAX_BINS - 1;
+
+			stats->time_counter[bin]++;
+		}
+		else {
+			stats->init_prof = 1;
+		}
+
+		stats->last_req = now;
+
+		bin = qc->nbytes >> BYTE_SHIFT;
+		if (bin >= MAX_BINS)
+			bin = MAX_BINS - 1;
+
+		stats->data_counter[bin]++;
+
+		if (!stats->nb_pending) {
+			stats->first_issue = now;
+			stats->nb_pending_total = 0;
+		}
+
+		stats->nb_pending_total++;
+
+		/* This should never overflow */
+		stats->pending_counter[stats->nb_pending & (MAX_AHCI_SLOTS - 1)]++;
+
+		stats->nb_pending++;
+
+		if (stats->nb_pending_total > stats->nb_pending_max)
+			stats->nb_pending_max = stats->nb_pending_total;
+
+		stats->bytes_pending += qc->nbytes;
+		stats->pending_flag |= 1 << qc->tag;
 	}
 #endif
 
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 62b5eed..0d285e2 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4664,11 +4664,12 @@
 }
 
 #if defined(CONFIG_COMCERTO_AHCI_PROF)
-unsigned int ahci_qc_comp_counter[33];
-struct timeval ahci_last_qc_comp[32];
-unsigned int ahci_last_qc_comp_flag[32];
-unsigned int ahci_qc_no_free_slot = 0;
-extern unsigned int enable_ahci_prof;
+
+#include "ahci.h"
+
+struct ahci_port_stats ahci_port_stats[MAX_AHCI_PORTS];
+unsigned int enable_ahci_prof = 0;
+
 #endif
 
 /**
@@ -4701,7 +4702,7 @@
 #if defined(CONFIG_COMCERTO_AHCI_PROF)
 	if (enable_ahci_prof)
 		if (qc == NULL) {
-			ahci_qc_no_free_slot++;
+			ahci_port_stats[ap->port_no].no_free_slot++;
 		}
 #endif
 
@@ -4748,11 +4749,6 @@
 	struct ata_port *ap;
 	unsigned int tag;
 
-#if defined(CONFIG_COMCERTO_AHCI_PROF)
-	struct timeval now;
-	int diff_time_ms;
-#endif
-
 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
 	ap = qc->ap;
 
@@ -4763,23 +4759,45 @@
 		clear_bit(tag, &ap->qc_allocated);
 
 #if defined(CONFIG_COMCERTO_AHCI_PROF)
-	if (enable_ahci_prof) {
-		if (ahci_last_qc_comp_flag[tag]) {
-			int inx = 32;
+		if (enable_ahci_prof) {
+			struct ahci_port_stats *stats = &ahci_port_stats[ap->port_no];
 
-			do_gettimeofday(&now);
+			if (stats->pending_flag & (1 << tag)) {
+				stats->pending_flag &= ~(1 << tag);
+				stats->nb_pending--;
 
-			diff_time_ms = ((now.tv_sec - ahci_last_qc_comp[tag].tv_sec) * 1000) + 
-                                ((now.tv_usec - ahci_last_qc_comp[tag].tv_usec) / 1000);
+				if (!stats->nb_pending) {
+					struct timeval now;
+					int diff_time_us;
+					unsigned int rate;
+					int bin;
 
-			if (diff_time_ms < 512) 
-				inx = diff_time_ms >> 4;
+					do_gettimeofday(&now);
 
-			ahci_qc_comp_counter[inx]++;
+					diff_time_us = ((now.tv_sec - stats->first_issue.tv_sec) * 1000 * 1000) +
+								(now.tv_usec - stats->first_issue.tv_usec);
 
-			ahci_last_qc_comp_flag[tag] = 0;
+					stats->diff_us += diff_time_us;
+
+					/* Do the average for at least 10MiB of data transfered */
+					if (stats->bytes_pending > (10 * (1 << 20))) {
+
+						rate = ((stats->bytes_pending / stats->diff_us) * 1000 * 1000) >> 20; //MiBps
+
+						bin = rate >> RATE_SHIFT;
+						if (bin >= MAX_BINS)
+							bin = MAX_BINS - 1;
+
+						/* Track how many KiB were transfered at this rate */
+						stats->rate_counter[bin] += stats->bytes_pending >> 10;
+
+						/* Reset stats */
+						stats->bytes_pending = 0;
+						stats->diff_us = 0;
+					}
+				}
+			}
 		}
-	}
 #endif
 	}
 }
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 0e1b1d6..c5005a9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -288,6 +288,15 @@
 
 	  If unsure, say N.
 
+config RAID_ZERO_COPY
+        bool "Optimized DMA/XOR offload: reduce raid5 memcpy which offloaded for dma"
+        depends on ASYNC_TX_DMA
+        help
+          This allows the async_tx api try to reduce raid5 memcpy operations for
+          dma. If you have dma device to support memcpy offloading, you can set
+          it as Y, else N.
+
+
 config DMATEST
 	tristate "DMA Test client"
 	depends on DMA_ENGINE
diff --git a/drivers/dma/c2k_dma.c b/drivers/dma/c2k_dma.c
index dde086c..c5b7d91 100644
--- a/drivers/dma/c2k_dma.c
+++ b/drivers/dma/c2k_dma.c
@@ -11,9 +11,77 @@
 #include <linux/dma-mapping.h>
 #include <asm/io.h>
 #include <mach/c2k_dma.h>
+#include <linux/dmaengine.h>
+#include <linux/circ_buf.h>
+#include <linux/delay.h>
 
 
-static int mdma_busy = 0;
+#define XOR_MAX_SRC_CNT	6
+
+#define virt_to_xor_dma(pbase, vbase, vaddr)		((pbase + ((unsigned long)vaddr - (unsigned long)vbase)))
+
+struct comcerto_xor_device {
+	struct dma_device        device;
+};
+
+struct comcerto_xor_chan {
+	struct comcerto_xor_device        *device;
+	struct dma_chan                 chan;
+	struct tasklet_struct       irq_tasklet;
+	spinlock_t                  lock;
+	dma_cookie_t                completed_cookie;
+};
+
+struct comcerto_sw_xor_desc {
+	struct dma_async_tx_descriptor async_tx;
+	int src_cnt;
+	size_t len;
+	dma_addr_t dma_dest;
+	dma_addr_t dma_src[XOR_MAX_SRC_CNT];
+};
+
+void *comcerto_xor_pool_virt;
+dma_addr_t comcerto_xor_pool_phy;
+
+spinlock_t	comcerto_xor_cleanup_lock;
+static DECLARE_WAIT_QUEUE_HEAD(comcerto_xor_wait_queue);
+static int comcerto_xor_sleeping = 0;
+
+static int xor_rd_idx = 0;
+static int xor_wr_idx = 0;
+static int xor_dma_idx = 0;
+static int xor_sw_rd_idx = 0;
+static int xor_sw_wr_idx = 0;
+static int xor_sw_wr_prev_idx = 0;
+static int xor_current_batch_count = 0;
+
+#define XOR_FDESC_COUNT 512
+#if defined(CONFIG_COMCERTO_64K_PAGES)
+#define XOR_SW_FDESC_COUNT 32
+#else
+#define XOR_SW_FDESC_COUNT XOR_FDESC_COUNT
+#endif
+
+struct timer_list comcerto_xor_timer;
+#define COMPLETION_TIMEOUT      msecs_to_jiffies(100)
+static int comerto_xor_timer_first = 1;
+
+struct comcerto_xor_inbound_fdesc *xor_in_fdesc[XOR_FDESC_COUNT];
+struct comcerto_xor_outbound_fdesc *xor_out_fdesc[XOR_FDESC_COUNT];
+struct comcerto_sw_xor_desc     sw_xor_desc[XOR_SW_FDESC_COUNT];
+
+struct comcerto_xor_device *comcerto_xor_dev;
+struct comcerto_xor_chan   comcerto_xor_ch;
+#define OWNER_XOR_FREE	1
+#define OWNER_MEMCPY_FREE 2
+#define OWNER_XOR_BUSY	3
+#define OWNER_MEMCPY_BUSY	4
+static int dma_owned = 0;
+static int memcpy_processed_ongoing = 0;
+static int memcpy_pending_count = 0;
+
+
+//static int mdma_busy = 0;
 static int mdma_done;
 static spinlock_t mdma_lock;
 
@@ -37,18 +105,376 @@
 
 #define FDONE_MASK	0x80000000
 
-static DECLARE_WAIT_QUEUE_HEAD(mdma_busy_queue);
+#define FLENEN          0x2
+
+static DECLARE_WAIT_QUEUE_HEAD(mdma_memcpy_busy_queue);
 static DECLARE_WAIT_QUEUE_HEAD(mdma_done_queue);
 
 unsigned long mdma_in_desc_phy;
 unsigned long mdma_out_desc_phy;
 
-struct comcerto_xor_inbound_fdesc *mdma_in_desc;
-struct comcerto_xor_outbound_fdesc *mdma_out_desc;
+struct comcerto_memcpy_inbound_fdesc *mdma_in_desc;
+struct comcerto_memcpy_outbound_fdesc *mdma_out_desc;
 
 EXPORT_SYMBOL(mdma_in_desc);
 EXPORT_SYMBOL(mdma_out_desc);
 
+static inline void comcerto_xor_set_in_bdesc(u32 buf_idx, u32 bdesc_idx, u32 addr, u32 ctrl)
+{
+	xor_in_fdesc[buf_idx]->bdesc[bdesc_idx].bpointer = addr;
+	xor_in_fdesc[buf_idx]->bdesc[bdesc_idx].bcontrol = ctrl;
+}
+
+static inline void comcerto_xor_set_out_bdesc(u32 buf_idx, u32 bdesc_idx, u32 addr, u32 ctrl)
+{
+	xor_out_fdesc[buf_idx]->bdesc[bdesc_idx].bpointer = addr;
+	xor_out_fdesc[buf_idx]->bdesc[bdesc_idx].bcontrol = ctrl;
+}
+
+static void comcerto_xor_set_desc(sw_idx,  hw_idx)
+{
+	int i,split_no;
+	u32 fstatus0 = 0;
+	u32 addr;
+	int split_size;
+	dma_addr_t dest;
+	dma_addr_t *srcs;	
+	u32 block_size;
+	int src_cnt;
+
+	block_size = sw_xor_desc[sw_idx].len;
+	src_cnt = sw_xor_desc[sw_idx].src_cnt;
+	srcs = sw_xor_desc[sw_idx].dma_src;
+	dest = sw_xor_desc[sw_idx].dma_dest;
+
+	if(block_size != PAGE_SIZE)
+		printk("%s: input buffers not %d len\n",__func__, (unsigned int)PAGE_SIZE);	
+
+#if defined(CONFIG_COMCERTO_64K_PAGES)
+	block_size = block_size/16; //to get 4K
+	split_size = 16;
+#else
+	split_size = 1;
+#endif
+
+	for(split_no = 0 ; split_no < split_size; split_no++)
+	{
+
+		for(i = 0; i < src_cnt - 1; i++) {
+			addr = (u32)sw_xor_desc[sw_idx].dma_src[i] + 4096 * split_no;
+			comcerto_xor_set_in_bdesc(hw_idx, i, addr, block_size);
+		}
+
+		addr = (u32)sw_xor_desc[sw_idx].dma_src[src_cnt - 1] + 4096 * split_no;
+		comcerto_xor_set_in_bdesc(hw_idx, src_cnt - 1, addr, block_size | BLAST);
+
+		fstatus0 = 1; // New Req, reset block counter, block offset, clear scratchpad (overwrite existing data)
+		fstatus0 |=  (1 << 1); // Read SP, return content of scratch pad after processing input data
+		fstatus0 |=  (0 << 2); // Mode, Encode
+		fstatus0 |=  (src_cnt << 4); // Number of blocks to be processed
+		fstatus0 |=  (1 << 9); // Type, XOR
+		fstatus0 |=  (XOR_BLOCK_SIZE_4096 << 11);
+
+		xor_in_fdesc[hw_idx]->fcontrol = 0;
+		xor_in_fdesc[hw_idx]->fstatus0 = fstatus0;
+		xor_in_fdesc[hw_idx]->fstatus1 = 0;
+
+		addr = (u32)sw_xor_desc[sw_idx].dma_dest + 4096 * split_no;
+		comcerto_xor_set_out_bdesc(hw_idx, 0, addr, block_size | BLAST);
+
+		xor_out_fdesc[hw_idx]->fcontrol = 0;
+		xor_out_fdesc[hw_idx]->fstatus0 = 0;
+		xor_out_fdesc[hw_idx]->fstatus1 = 0;
+
+		hw_idx = (hw_idx + 1) % XOR_FDESC_COUNT;
+	}
+
+	xor_wr_idx = hw_idx;
+}
+
+static inline int comcerto_dma_busy(void)
+{
+	return (readl_relaxed(IO2M_CONTROL) & 0x1);
+}
+
+static void comcerto_xor_update_dma_head(int idx)
+{
+	u32 out_desc_head, in_desc_head;
+	out_desc_head = virt_to_xor_dma(comcerto_xor_pool_phy, comcerto_xor_pool_virt, xor_out_fdesc[idx]);
+	in_desc_head = virt_to_xor_dma(comcerto_xor_pool_phy, comcerto_xor_pool_virt, xor_in_fdesc[idx]);
+
+	wmb();
+	writel_relaxed(out_desc_head, IO2M_HEAD);
+	writel_relaxed(in_desc_head, M2IO_HEAD);
+}
+
+static void comcerto_xor_update_dma_flen(int flen)
+{
+	wmb();
+	writel_relaxed(flen, M2IO_FLEN);
+	writel_relaxed(flen, IO2M_FLEN);
+}
+
+static int comcerto_xor_rb_full(void)
+{
+	if(CIRC_SPACE(xor_sw_wr_idx, xor_sw_rd_idx, XOR_SW_FDESC_COUNT) > 0)
+		return 0;
+	else
+		return 1;
+}
+
+void comcerto_xor_request_wait(void)
+{
+	DEFINE_WAIT(wait);
+
+	prepare_to_wait(&comcerto_xor_wait_queue, &wait, TASK_UNINTERRUPTIBLE);
+
+	comcerto_xor_sleeping++;
+
+	while (comcerto_xor_rb_full()) {
+		spin_unlock_bh(&comcerto_xor_ch.lock);
+		schedule();
+		spin_lock_bh(&comcerto_xor_ch.lock);
+		prepare_to_wait(&comcerto_xor_wait_queue, &wait, TASK_UNINTERRUPTIBLE);
+	}
+
+	finish_wait(&comcerto_xor_wait_queue, &wait);
+	comcerto_xor_sleeping--;
+}
+
+static void comcerto_dma_process(void)
+{
+	unsigned long pending_count;
+
+	if(!dma_owned || dma_owned==OWNER_XOR_FREE)
+	{
+		pending_count = CIRC_CNT(xor_wr_idx, xor_dma_idx, XOR_FDESC_COUNT);
+		if(pending_count)
+		{
+			dma_owned = OWNER_XOR_BUSY;
+			xor_current_batch_count = pending_count;
+			comcerto_xor_update_dma_flen(pending_count);
+			comcerto_xor_update_dma_head(xor_dma_idx);
+		}
+	}
+}
+
+static void comcerto_xor_cleanup(void)
+{
+	int i,j,k;
+	int idx;
+	int cleanup_count;
+	unsigned long flags;
+	int split_size;
+	struct comcerto_sw_xor_desc *sw_desc;
+
+	spin_lock_irqsave(&mdma_lock, flags);
+	cleanup_count = CIRC_CNT(xor_dma_idx, xor_rd_idx, XOR_FDESC_COUNT);
+	spin_unlock_irqrestore(&mdma_lock, flags);
+
+#if defined(CONFIG_COMCERTO_64K_PAGES)
+	split_size = 16;
+#else
+	split_size = 1;
+#endif
+
+	if(cleanup_count && (cleanup_count % split_size == 0))
+	{
+		for(i = 0 ; i < cleanup_count/split_size; i++)
+		{
+			struct dma_async_tx_descriptor *tx;
+
+#if defined(CONFIG_COMCERTO_64K_PAGES)
+			if(xor_rd_idx%16)
+				printk("%s: xor_rd_idx %d not multiple of 16\n",__func__, xor_rd_idx);
+#endif
+			idx = xor_sw_rd_idx;
+			tx = &sw_xor_desc[idx].async_tx;
+			sw_desc = &sw_xor_desc[idx];
+
+			for (j = 0; j < sw_desc->src_cnt; j++) {
+				dma_unmap_page(NULL, sw_desc->dma_src[j], sw_desc->len, DMA_TO_DEVICE);
+			}
+			dma_unmap_page(NULL, sw_desc->dma_dest, sw_desc->len, DMA_BIDIRECTIONAL);
+
+			comcerto_xor_ch.completed_cookie = tx->cookie;
+
+			if (tx->callback) {
+				tx->callback(tx->callback_param);
+				tx->callback = NULL;
+			}
+			else
+				printk("No Callback\n");
+
+			smp_mb();		
+
+			spin_lock_irqsave(&mdma_lock, flags);
+			for(k = 0 ; k < split_size; k++)
+			{
+				xor_rd_idx = (xor_rd_idx + 1) % XOR_FDESC_COUNT;
+			}
+			spin_unlock_irqrestore(&mdma_lock, flags);
+
+			xor_sw_rd_idx = (xor_sw_rd_idx + 1) % XOR_SW_FDESC_COUNT;
+		}
+	}
+	else
+	{
+		if(cleanup_count)
+			printk("%s: cleanup_count %d not multiple of 16\n",__func__, cleanup_count);
+	}
+}
+
+static void comcerto_xor_tasklet(unsigned long data)
+{
+	spin_lock_bh(&comcerto_xor_ch.lock);
+
+	comcerto_xor_cleanup();
+
+	spin_unlock_bh(&comcerto_xor_ch.lock);
+}
+
+static void comcerto_xor_timer_fnc(unsigned long data)
+{
+	unsigned long           flags;
+
+	spin_lock_irqsave(&mdma_lock, flags);
+
+	if(comcerto_xor_sleeping)
+		wake_up(&comcerto_xor_wait_queue);
+
+	spin_unlock_irqrestore(&mdma_lock, flags);
+
+	comcerto_xor_tasklet(0);
+
+	mod_timer(&comcerto_xor_timer, jiffies + COMPLETION_TIMEOUT);
+
+}
+
+
+static void comcerto_xor_issue_pending(struct dma_chan *chan)
+{
+	unsigned long           flags;
+
+
+	spin_lock_irqsave(&mdma_lock, flags);
+
+	comcerto_dma_process();
+
+	if(comcerto_xor_sleeping)
+		wake_up(&comcerto_xor_wait_queue);
+
+	spin_unlock_irqrestore(&mdma_lock, flags);
+
+	comcerto_xor_tasklet(0);
+}
+
+static inline struct comcerto_sw_xor_desc *txd_to_comcerto_desc(struct dma_async_tx_descriptor *txd)
+{
+	return container_of(txd, struct comcerto_sw_xor_desc, async_tx);
+}
+
+
+static dma_cookie_t comcerto_xor_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	unsigned long flags;
+
+	struct dma_chan *c = tx->chan;
+	dma_cookie_t cookie;
+
+	spin_lock_bh(&comcerto_xor_ch.lock);
+
+	cookie = c->cookie;
+	cookie++;
+	if (cookie < 0)
+		cookie = 1;
+	tx->cookie = cookie;
+	c->cookie = cookie;
+
+	spin_lock_irqsave(&mdma_lock, flags);
+
+	comcerto_xor_set_desc(xor_sw_wr_prev_idx, xor_wr_idx);
+	xor_sw_wr_prev_idx = (xor_sw_wr_prev_idx + 1) % XOR_SW_FDESC_COUNT ;
+	comcerto_dma_process();
+
+	spin_unlock_irqrestore(&mdma_lock, flags);
+
+	spin_unlock_bh(&comcerto_xor_ch.lock);
+
+	if(comerto_xor_timer_first)
+	{
+		mod_timer(&comcerto_xor_timer, jiffies + COMPLETION_TIMEOUT);
+		comerto_xor_timer_first = 0;
+	}
+	return cookie;
+}
+
+	struct dma_async_tx_descriptor *
+comcerto_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *srcs,
+		unsigned int src_cnt, size_t len, unsigned long xor_flags)
+{
+	int desc_idx;
+	int i;
+
+	spin_lock_bh(&comcerto_xor_ch.lock);
+	if(comcerto_xor_rb_full())
+	{
+		comcerto_xor_request_wait();
+	}
+
+	desc_idx = xor_sw_wr_idx;
+	xor_sw_wr_idx = (xor_sw_wr_idx + 1) % XOR_SW_FDESC_COUNT ;
+
+	sw_xor_desc[desc_idx].async_tx.flags = xor_flags;
+	sw_xor_desc[desc_idx].src_cnt = src_cnt;
+	sw_xor_desc[desc_idx].len = len;
+	sw_xor_desc[desc_idx].dma_dest = dest;
+	for(i = 0; i < src_cnt; i++)
+		sw_xor_desc[desc_idx].dma_src[i] = srcs[i];
+
+	spin_unlock_bh(&comcerto_xor_ch.lock);
+
+	return &sw_xor_desc[desc_idx].async_tx;
+
+}
+
+
+static int comcerto_xor_alloc_chan_resources(struct dma_chan *chan)
+{
+	int i;
+
+	for(i = 0; i < XOR_SW_FDESC_COUNT; i++) {
+		memset(&sw_xor_desc[i], 0, sizeof(struct comcerto_sw_xor_desc));
+		sw_xor_desc[i].async_tx.tx_submit = comcerto_xor_tx_submit;
+		sw_xor_desc[i].async_tx.cookie = 0;
+		dma_async_tx_descriptor_init(&sw_xor_desc[i].async_tx, chan);
+	}
+	return XOR_SW_FDESC_COUNT;
+}
+
+static void comcerto_xor_free_chan_resources(struct dma_chan *chan)
+{
+	//TODO
+
+	printk("*** %s ***\n",__func__);
+
+	return;
+}
+
+static enum dma_status comcerto_xor_status(struct dma_chan *chan,
+		dma_cookie_t cookie,
+		struct dma_tx_state *txstate)
+{
+	dma_cookie_t last_used;
+	dma_cookie_t last_complete;
+
+	last_used = chan->cookie;
+	last_complete = comcerto_xor_ch.completed_cookie;
+	dma_set_tx_state(txstate, last_complete, last_used, 0);
+
+	return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
 
 #if defined(CONFIG_COMCERTO_MDMA_PROF)
 
@@ -112,9 +538,9 @@
 #endif
 
 
-static inline dma_addr_t dma_acp_map_page(struct comcerto_dma_sg *sg, struct page *page, unsigned int offset, unsigned int len, int dir, int use_acp)
+static inline dma_addr_t dma_acp_map_page(struct comcerto_dma_sg *sg, void *p, unsigned int len, int dir, int use_acp)
 {
-	dma_addr_t phys_addr = page_to_phys(page) + offset;
+	dma_addr_t phys_addr = virt_to_phys(p);
 	dma_addr_t low, high;
 
 	if (!use_acp)
@@ -145,11 +571,11 @@
 	}
 
 map:
-	return dma_map_page(NULL, page, offset, len, dir); //TODO add proper checks
+	return dma_map_single(NULL, p, len, dir); //TODO add proper checks
 }
 
 
-int comcerto_dma_sg_add_input(struct comcerto_dma_sg *sg, struct page *page, unsigned int offset, unsigned int len, int use_acp)
+int comcerto_dma_sg_add_input(struct comcerto_dma_sg *sg, void *p, unsigned int len, int use_acp)
 {
 	dma_addr_t phys_addr;
 
@@ -162,7 +588,7 @@
 		if (sg->input_idx >= MDMA_INBOUND_BUF_DESC)
 			return -1;
 
-		phys_addr = dma_acp_map_page(sg, page, offset, len, DMA_TO_DEVICE, use_acp);
+		phys_addr = dma_acp_map_page(sg, p, len, DMA_TO_DEVICE, use_acp);
 
 		sg->in_bdesc[sg->input_idx].split = 0;
 		sg->in_bdesc[sg->input_idx].phys_addr = phys_addr;
@@ -175,7 +601,7 @@
 		if (sg->input_idx >= (MDMA_INBOUND_BUF_DESC - 1))
 			return -1;
 
-		phys_addr = dma_acp_map_page(sg, page, offset, len, DMA_TO_DEVICE, use_acp);
+		phys_addr = dma_acp_map_page(sg, p, len, DMA_TO_DEVICE, use_acp);
 
 		sg->in_bdesc[sg->input_idx].split = 1;
 		sg->in_bdesc[sg->input_idx].phys_addr = phys_addr;
@@ -183,7 +609,7 @@
 		sg->input_idx++;
 		sg->in_bdesc[sg->input_idx].split = 0;
 		sg->in_bdesc[sg->input_idx].phys_addr = phys_addr + MDMA_SPLIT_BUF_SIZE;
-		sg->in_bdesc[sg->input_idx].len = MDMA_SPLIT_BUF_SIZE;
+		sg->in_bdesc[sg->input_idx].len = len - MDMA_SPLIT_BUF_SIZE;
 		sg->input_idx++;
 
 		return 0;
@@ -191,7 +617,7 @@
 }
 EXPORT_SYMBOL(comcerto_dma_sg_add_input);
 
-int comcerto_dma_sg_add_output(struct comcerto_dma_sg *sg, struct page *page, unsigned int offset, unsigned int len, int use_acp)
+int comcerto_dma_sg_add_output(struct comcerto_dma_sg *sg, void *p, unsigned int len, int use_acp)
 {
 	dma_addr_t phys_addr;
 
@@ -204,7 +630,7 @@
 		if (sg->output_idx >= MDMA_OUTBOUND_BUF_DESC)
 			return -1;
 
-		phys_addr = dma_acp_map_page(sg, page, offset, len, DMA_FROM_DEVICE, use_acp);
+		phys_addr = dma_acp_map_page(sg, p, len, DMA_FROM_DEVICE, use_acp);
 
 		sg->out_bdesc[sg->output_idx].split = 0;
 		sg->out_bdesc[sg->output_idx].phys_addr = phys_addr;
@@ -217,7 +643,7 @@
 		if (sg->output_idx >= (MDMA_OUTBOUND_BUF_DESC - 1))
 			return -1;
 
-		phys_addr = dma_acp_map_page(sg, page, offset, len, DMA_FROM_DEVICE, use_acp);
+		phys_addr = dma_acp_map_page(sg, p, len, DMA_FROM_DEVICE, use_acp);
 
 		sg->out_bdesc[sg->output_idx].split = 1;
 		sg->out_bdesc[sg->output_idx].phys_addr = phys_addr;
@@ -225,7 +651,7 @@
 		sg->output_idx++;
 		sg->out_bdesc[sg->output_idx].split = 0;
 		sg->out_bdesc[sg->output_idx].phys_addr = phys_addr + MDMA_SPLIT_BUF_SIZE;
-		sg->out_bdesc[sg->output_idx].len = MDMA_SPLIT_BUF_SIZE;
+		sg->out_bdesc[sg->output_idx].len = len - MDMA_SPLIT_BUF_SIZE;
 		sg->output_idx++;
 
 		return 0;
@@ -242,7 +668,7 @@
 
 	writel_relaxed(sg->low_phys_addr |
 			AWUSER_COHERENT(WRITEBACK) | AWPROT(0x0) | AWCACHE(CACHEABLE | BUFFERABLE) |
-                        ARUSER_COHERENT(WRITEBACK) | ARPROT(0x0) | ARCACHE(CACHEABLE | BUFFERABLE),
+			ARUSER_COHERENT(WRITEBACK) | ARPROT(0x0) | ARCACHE(CACHEABLE | BUFFERABLE),
 			COMCERTO_GPIO_A9_ACP_CONF_REG);
 
 	remaining = len;
@@ -334,20 +760,23 @@
 
 	spin_lock_irqsave(&mdma_lock, flags);
 
-	if (mdma_busy) {
-		prepare_to_wait(&mdma_busy_queue, &wait, TASK_UNINTERRUPTIBLE);
+	if (dma_owned && (dma_owned != OWNER_MEMCPY_FREE)) {
+		prepare_to_wait(&mdma_memcpy_busy_queue, &wait, TASK_UNINTERRUPTIBLE);
 
-		while (mdma_busy) {
+		memcpy_pending_count++;
+
+		while (!dma_owned || !(dma_owned == OWNER_MEMCPY_FREE)) {
 			spin_unlock_irqrestore(&mdma_lock, flags);
 			schedule();
 			spin_lock_irqsave(&mdma_lock, flags);
-			prepare_to_wait(&mdma_busy_queue, &wait, TASK_UNINTERRUPTIBLE);
+			prepare_to_wait(&mdma_memcpy_busy_queue, &wait, TASK_UNINTERRUPTIBLE);
 		}
 
-		finish_wait(&mdma_busy_queue, &wait);
-	}
+		memcpy_pending_count--;
 
-	mdma_busy = 1;
+		finish_wait(&mdma_memcpy_busy_queue, &wait);
+	}
+	dma_owned = OWNER_MEMCPY_BUSY;
 
 	spin_unlock_irqrestore(&mdma_lock, flags);
 }
@@ -355,13 +784,15 @@
 
 void comcerto_dma_put(void)
 {
+#if 0
 	unsigned long flags;
 
 	spin_lock_irqsave(&mdma_lock, flags);
 	mdma_busy = 0;
 	spin_unlock_irqrestore(&mdma_lock, flags);
 
-	wake_up(&mdma_busy_queue);
+	wake_up(&mdma_memcpy_busy_queue);
+#endif
 }
 EXPORT_SYMBOL(comcerto_dma_put);
 
@@ -369,13 +800,13 @@
 static void comcerto_dma_setup(void)
 {
 	/* IO2M_IRQ_ENABLE: Enable IRQ_IRQFDON*/
-	writel_relaxed(IRQ_IRQFDON, IO2M_IRQ_ENABLE);
+	writel_relaxed(IRQ_IRQFDON|IRQ_IRQFLEN|IRQ_IRQFTHLD|IRQ_IRQFLST, IO2M_IRQ_ENABLE);
+	writel_relaxed(IRQ_IRQFDON|IRQ_IRQFLEN|IRQ_IRQFTHLD|IRQ_IRQFLST, M2IO_IRQ_ENABLE);
 
-	writel_relaxed(0x0, M2IO_CONTROL);
-	writel_relaxed(0xf, M2IO_BURST);
-
-	writel_relaxed(0x0, IO2M_CONTROL);
-	writel_relaxed(0xf, IO2M_BURST);
+	writel_relaxed(FLENEN, M2IO_CONTROL);
+	writel_relaxed(0xf | (0x3ff << 8), M2IO_BURST);
+	writel_relaxed(FLENEN, IO2M_CONTROL);
+	writel_relaxed(0xf | (0x3ff << 8), IO2M_BURST);
 }
 
 
@@ -402,6 +833,9 @@
 	// Initialize the Inbound Head Pointer
 	writel_relaxed(mdma_in_desc_phy, M2IO_HEAD);
 
+	writel_relaxed(1, M2IO_FLEN);
+	writel_relaxed(1, IO2M_FLEN);
+
 	wmb();
 }
 EXPORT_SYMBOL(comcerto_dma_start);
@@ -469,6 +903,9 @@
 
 static irqreturn_t c2k_dma_handle_interrupt(int irq, void *data)
 {
+	int i;
+	int pending_count;
+	unsigned long flags;
 	u32 intr_cause = readl_relaxed(IO2M_IRQ_STATUS);
 
 	writel_relaxed(intr_cause, IO2M_IRQ_STATUS);
@@ -490,11 +927,89 @@
 	}
 
 	if (intr_cause & IRQ_IRQFDON) {
-		if (unlikely(!(mdma_out_desc->fstatus1 & FDONE_MASK)))
-			printk(KERN_INFO "Fdesc not done\n");
 
-		mdma_done = 1;
-		wake_up(&mdma_done_queue);
+	}
+
+	if (intr_cause & IRQ_IRQFLEN) {
+
+		spin_lock_irqsave(&mdma_lock, flags);
+		if(!dma_owned)
+			printk(KERN_ALERT " NULL MDMA Ownership !!!\n");
+
+
+		if(dma_owned==OWNER_XOR_BUSY)
+		{
+
+			for(i = 0 ; i < xor_current_batch_count; i++)
+				xor_dma_idx = (xor_dma_idx + 1) % XOR_FDESC_COUNT;
+
+			xor_current_batch_count = 0;
+
+
+			if(memcpy_pending_count)
+			{
+				dma_owned = OWNER_MEMCPY_FREE;
+				wake_up(&mdma_memcpy_busy_queue);
+			}
+			else
+			{
+				pending_count = CIRC_CNT(xor_wr_idx, xor_dma_idx, XOR_FDESC_COUNT);
+				if(pending_count)
+				{
+					dma_owned = OWNER_XOR_BUSY;
+					xor_current_batch_count = pending_count;
+					comcerto_xor_update_dma_flen(xor_current_batch_count);
+					comcerto_xor_update_dma_head(xor_dma_idx);
+				}
+				else
+				{
+					dma_owned = 0;
+				}
+			}
+
+			if(comcerto_xor_sleeping)
+			{
+				wake_up(&comcerto_xor_wait_queue);
+			}
+
+			tasklet_schedule(&comcerto_xor_ch.irq_tasklet);
+		}
+		else //memcpy
+		{
+			mdma_done = 1;
+			wake_up(&mdma_done_queue);
+
+			memcpy_processed_ongoing++;
+			pending_count = CIRC_CNT(xor_wr_idx, xor_dma_idx, XOR_FDESC_COUNT);
+
+			if(pending_count)
+			{
+				memcpy_processed_ongoing = 0;
+				dma_owned = OWNER_XOR_BUSY;
+				xor_current_batch_count = pending_count;
+				comcerto_xor_update_dma_flen(xor_current_batch_count);
+				comcerto_xor_update_dma_head(xor_dma_idx);
+
+				if(comcerto_xor_sleeping)
+				{
+					wake_up(&comcerto_xor_wait_queue);
+				}
+			}
+			else
+			{
+				if(memcpy_pending_count)
+				{
+					dma_owned = OWNER_MEMCPY_FREE;
+					wake_up(&mdma_memcpy_busy_queue);
+				}
+				else
+				{
+					dma_owned = 0;
+				}
+			}
+
+		}
+		spin_unlock_irqrestore(&mdma_lock, flags);
 	}
 
 	return IRQ_HANDLED;
@@ -519,8 +1034,11 @@
 {
 	struct resource      *io;
 	int                  irq;
-	void *aram_pool = (void *)IRAM_MEMORY_VADDR;
-	int ret;
+	void *memcpy_pool = (void *)IRAM_MEMORY_VADDR;
+	void *xor_pool;
+	int i;
+	struct dma_device    *dma_dev;
+	int ret = 0;
 
 	/* Retrieve related resources(mem, irq) from platform_device */
 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -533,32 +1051,104 @@
 
 	ret = request_irq(irq, c2k_dma_handle_interrupt, 0, "MDMA", NULL);
 	if (ret < 0)
-		goto err_irq;
+		return ret;
 
 	virtbase = ioremap(io->start, resource_size(io));
 	if (!virtbase)
-		goto err_ioremap;
+		goto err_free_irq;
+
+	/* Initialize comcerto_xor_device */
+	comcerto_xor_dev = devm_kzalloc(&pdev->dev, sizeof(struct comcerto_xor_device), GFP_KERNEL);
+
+	if(!comcerto_xor_dev)
+	{
+		ret = -ENOMEM;
+		goto err_free_remap;
+	}
+
+	dma_dev = &comcerto_xor_dev->device;
+	INIT_LIST_HEAD(&dma_dev->channels);
+	dma_cap_set(DMA_XOR,dma_dev->cap_mask);
+	dma_dev->dev = &pdev->dev;
+	dma_dev->device_alloc_chan_resources = comcerto_xor_alloc_chan_resources;
+	dma_dev->device_free_chan_resources  = comcerto_xor_free_chan_resources;
+	dma_dev->device_tx_status            = comcerto_xor_status;
+	dma_dev->device_issue_pending        = comcerto_xor_issue_pending;
+	dma_dev->device_prep_dma_xor         = comcerto_xor_prep_dma_xor;
+	dma_dev->max_xor = XOR_MAX_SRC_CNT;
+	platform_set_drvdata(pdev,comcerto_xor_dev);
+
+	/* Initialize comcerto_xor_chan */
+	comcerto_xor_ch.chan.device = dma_dev;
+	list_add_tail(&comcerto_xor_ch.chan.device_node,&dma_dev->channels);
+
+	ret = dma_async_device_register(dma_dev);
+	if (unlikely(ret)) {
+		printk(KERN_ERR "%s: Failed to register XOR DMA channel %d\n",__func__,ret);
+		goto err_free_dma;
+	}
+	else
+		printk(KERN_INFO "%s: XOR DMA channel registered\n",__func__);
+
+	spin_lock_init(&comcerto_xor_ch.lock);
+	spin_lock_init(&comcerto_xor_cleanup_lock);
 
 	spin_lock_init(&mdma_lock);
 
-	//initializing
-	mdma_in_desc = (struct comcerto_xor_inbound_fdesc *) (aram_pool);
-	aram_pool += sizeof(struct comcerto_xor_inbound_fdesc);
-	aram_pool = (void *)((unsigned long)(aram_pool + 15) & ~15);
-	mdma_out_desc = (struct comcerto_xor_outbound_fdesc *) (aram_pool);
+	//Memcpy descriptor initializing
+	mdma_in_desc = (struct comcerto_memcpy_inbound_fdesc *) (memcpy_pool);
+	memcpy_pool += sizeof(struct comcerto_memcpy_inbound_fdesc);
+	memcpy_pool = (void *)((unsigned long)(memcpy_pool + 15) & ~15);
+	mdma_out_desc = (struct comcerto_memcpy_outbound_fdesc *) (memcpy_pool);
+	memcpy_pool += sizeof(struct comcerto_memcpy_outbound_fdesc);
+	memcpy_pool = (void *)((unsigned long)(memcpy_pool + 15) & ~15);
 
 	mdma_in_desc_phy = virt_to_aram(mdma_in_desc);
 	mdma_out_desc_phy = virt_to_aram(mdma_out_desc);
 
+	//XOR descriptor initializing
+	comcerto_xor_pool_virt = dma_alloc_coherent(NULL, XOR_FDESC_COUNT * (sizeof(struct comcerto_xor_inbound_fdesc) 
+				+  sizeof(struct comcerto_xor_outbound_fdesc)), &comcerto_xor_pool_phy, GFP_KERNEL);
+	xor_pool = comcerto_xor_pool_virt;
+
+	for (i = 0; i < XOR_FDESC_COUNT; i++) {
+		xor_in_fdesc[i] = (struct comcerto_xor_inbound_fdesc *) (xor_pool);
+		xor_pool += sizeof(struct comcerto_xor_inbound_fdesc);
+		xor_pool = (void *)((unsigned long)(xor_pool + 15) & ~15);
+		xor_out_fdesc[i] = (struct comcerto_xor_outbound_fdesc *) (xor_pool);
+		xor_pool += sizeof(struct comcerto_xor_outbound_fdesc);
+		xor_pool = (void *)((unsigned long)(xor_pool + 15) & ~15);
+
+		memset(xor_in_fdesc[i], 0 , sizeof(struct comcerto_xor_inbound_fdesc));
+		memset(xor_out_fdesc[i], 0 , sizeof(struct comcerto_xor_outbound_fdesc));
+	}
+
+	for(i = 0; i < XOR_FDESC_COUNT - 1; i++) {
+		xor_in_fdesc[i]->next_desc = virt_to_xor_dma(comcerto_xor_pool_phy, comcerto_xor_pool_virt, xor_in_fdesc[i+1]);
+		xor_out_fdesc[i]->next_desc = virt_to_xor_dma(comcerto_xor_pool_phy, comcerto_xor_pool_virt, xor_out_fdesc[i+1]);
+	}
+	xor_in_fdesc[XOR_FDESC_COUNT-1]->next_desc = virt_to_xor_dma(comcerto_xor_pool_phy, comcerto_xor_pool_virt, xor_in_fdesc[0]);
+	xor_out_fdesc[XOR_FDESC_COUNT-1]->next_desc = virt_to_xor_dma(comcerto_xor_pool_phy, comcerto_xor_pool_virt, xor_out_fdesc[0]);
+
+	init_timer(&comcerto_xor_timer);
+	comcerto_xor_timer.function = comcerto_xor_timer_fnc;
+	comcerto_xor_timer.data = 0;
+
+	tasklet_init(&comcerto_xor_ch.irq_tasklet, comcerto_xor_tasklet, 0);
+
 	comcerto_dma_setup();
 
-	return 0;
+	goto out;
 
-err_ioremap:
+err_free_dma:
+	platform_set_drvdata(pdev,NULL);
+	kfree(comcerto_xor_dev);
+err_free_remap:
+	iounmap(virtbase);	
+err_free_irq:
 	free_irq(irq, NULL);
-
-err_irq:
-	return -1;
+out:
+	return ret;
 }
 
 
@@ -566,8 +1156,8 @@
 	.probe        = comcerto_dma_probe,
 	.remove       = comcerto_dma_remove,
 	.driver       = {
-			.owner = THIS_MODULE,
-			.name  = "comcerto_dma",
+		.owner = THIS_MODULE,
+		.name  = "comcerto_dma",
 	},
 };
 
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 58d8c6d..5040ce1 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -29,6 +29,16 @@
 
 #include <linux/device-mapper.h>
 
+#if defined(CONFIG_OCF_DM_CRYPT)
+#undef DM_DEBUG
+#ifdef DM_DEBUG
+#define dmprintk printk
+#else
+#define dmprintk(fmt,args...)
+#endif
+#include <linux/cryptodev.h>
+#endif /* CONFIG_OCF_DM_CRYPT */
+
 #define DM_MSG_PREFIX "crypt"
 
 /*
@@ -146,6 +156,10 @@
 	sector_t iv_offset;
 	unsigned int iv_size;
 
+#if defined(CONFIG_OCF_DM_CRYPT)
+	struct cryptoini	cr_dm;	/* OCF session */
+	uint64_t	ocf_cryptoid;	/* OCF sesssion ID */
+#endif
 	/*
 	 * Duplicated per cpu state. Access through
 	 * per_cpu_ptr() only.
@@ -269,7 +283,7 @@
 		return err;
 
 	for_each_possible_cpu(cpu) {
-		essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private,
+		essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private;
 
 		err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
 				    crypto_hash_digestsize(essiv->hash_tfm));
@@ -305,7 +319,7 @@
 					     struct dm_target *ti,
 					     u8 *salt, unsigned saltsize)
 {
-	struct crypto_cipher *essiv_tfm;
+	struct crypto_cipher *essiv_tfm=NULL;
 	int err;
 
 	/* Setup the essiv_tfm with the given salt */
@@ -315,8 +329,12 @@
 		return essiv_tfm;
 	}
 
+#if  defined(CONFIG_OCF_DM_CRYPT)
+	if (crypto_cipher_blocksize(essiv_tfm) != cc->iv_size) {
+#else
 	if (crypto_cipher_blocksize(essiv_tfm) !=
 	    crypto_ablkcipher_ivsize(any_tfm(cc))) {
+#endif
 		ti->error = "Block size of ESSIV cipher does "
 			    "not match IV size of block cipher";
 		crypto_free_cipher(essiv_tfm);
@@ -419,6 +437,7 @@
 	return 0;
 }
 
+#if !defined(CONFIG_OCF_DM_CRYPT)
 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
 			      const char *opts)
 {
@@ -459,6 +478,7 @@
 
 	return 0;
 }
+#endif
 
 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
 			     struct dm_crypt_request *dmreq)
@@ -634,11 +654,13 @@
 	.generator = crypt_iv_essiv_gen
 };
 
+#if !defined(CONFIG_OCF_DM_CRYPT)
 static struct crypt_iv_operations crypt_iv_benbi_ops = {
 	.ctr	   = crypt_iv_benbi_ctr,
 	.dtr	   = crypt_iv_benbi_dtr,
 	.generator = crypt_iv_benbi_gen
 };
+#endif
 
 static struct crypt_iv_operations crypt_iv_null_ops = {
 	.generator = crypt_iv_null_gen
@@ -687,6 +709,275 @@
 		crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
 }
 
+static void kcryptd_crypt_read_done(struct dm_crypt_io *io);
+static void crypt_alloc_req(struct crypt_config *cc,
+			    struct convert_context *ctx);
+static void crypt_inc_pending(struct dm_crypt_io *io);
+static void crypt_dec_pending(struct dm_crypt_io *io);
+
+#if defined(CONFIG_OCF_DM_CRYPT)
+struct ocf_wr_priv {
+	u32			dm_ocf_wr_completed;	/* Num of wr completions */
+	u32			dm_ocf_wr_pending;	/* Num of wr pendings */
+	wait_queue_head_t	dm_ocf_wr_queue;	/* waiting Q, for wr completion */
+};
+
+struct ocf_rd_priv {
+	u32			dm_ocf_rd_completed;	/* Num of rd completions */
+	u32			dm_ocf_rd_pending;	/* Num of rd pendings */
+	wait_queue_head_t	dm_ocf_rd_queue;	/* waiting Q, for rd completion */
+};
+
+static int dm_ocf_wr_cb(struct cryptop *crp)
+{
+	struct ocf_wr_priv *ocf_wr_priv;
+
+	if(crp == NULL) {
+		printk("dm_ocf_wr_cb: crp is NULL!! \n");
+		return 0;
+	}
+
+	ocf_wr_priv = (struct ocf_wr_priv*)crp->crp_opaque;
+
+	ocf_wr_priv->dm_ocf_wr_completed++;
+
+	/* if no more pending for read, wake up the read task. */
+	if(ocf_wr_priv->dm_ocf_wr_completed == ocf_wr_priv->dm_ocf_wr_pending)
+		wake_up(&ocf_wr_priv->dm_ocf_wr_queue);
+
+	crypto_freereq(crp);
+	return 0;
+}
+
+static int dm_ocf_rd_cb(struct cryptop *crp)
+{
+	struct ocf_rd_priv *ocf_rd_priv;
+
+	if(crp == NULL) {
+		printk("dm_ocf_rd_cb: crp is NULL!! \n");
+		return 0;
+	}
+
+	ocf_rd_priv = (struct ocf_rd_priv*)crp->crp_opaque;
+
+	ocf_rd_priv->dm_ocf_rd_completed++;
+
+	/* if no more pending for read, wake up the read task. */
+	if(ocf_rd_priv->dm_ocf_rd_completed == ocf_rd_priv->dm_ocf_rd_pending)
+		wake_up(&ocf_rd_priv->dm_ocf_rd_queue);
+
+	crypto_freereq(crp);
+	return 0;
+}
+
+static inline int dm_ocf_process(struct crypt_config *cc, struct scatterlist *out,
+		struct scatterlist *in, unsigned int len, u8 *iv, int iv_size, int write, void *priv)
+{
+	struct cryptop *crp;
+	struct cryptodesc *crda = NULL;
+	struct page *in_page = sg_page(in);
+	struct page *out_page = sg_page(out);
+	int r=0;
+
+	if(!iv) {
+		printk("dm_ocf_process: only CBC mode is supported\n");
+		return -EPERM;
+	}
+
+	crp = crypto_getreq(1);  /* only encryption/decryption */
+	if (!crp) {
+		printk("\ndm_ocf_process: crypto_getreq failed!!\n");
+		return -ENOMEM;
+	}
+
+	crda = crp->crp_desc;
+
+	crda->crd_flags  = (write)? CRD_F_ENCRYPT: 0;
+	crda->crd_alg    = cc->cr_dm.cri_alg;
+	crda->crd_skip   = 0;
+	crda->crd_len    = len;
+	crda->crd_inject = 0; /* NA */
+	crda->crd_klen   = cc->cr_dm.cri_klen;
+	crda->crd_key    = cc->cr_dm.cri_key;
+
+	if (iv) {
+		//crda->crd_flags |= (CRD_F_IV_PRESENT);
+		crda->crd_flags |= (CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT);
+		if( iv_size > EALG_MAX_BLOCK_LEN ) {
+			printk("dm_ocf_process: iv is too big!!\n");
+		}
+		memcpy(&crda->crd_iv, iv, iv_size);
+	}
+
+
+	//dmprintk("len: %d\n",len);
+	crp->crp_ilen = len; /* Total input length */
+	//crp->crp_flags = CRYPTO_F_CBIMM;
+	crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_BATCH;
+	crp->crp_buf = page_address(in_page) + in->offset;
+	crp->crp_out_buf = page_address(out_page) + out->offset;
+	crp->crp_opaque = priv;
+	if(write) {
+		crp->crp_callback = dm_ocf_wr_cb;
+	}
+	else {
+		crp->crp_callback = dm_ocf_rd_cb;
+	}
+	crp->crp_sid = cc->ocf_cryptoid;
+	r = crypto_dispatch(crp);
+	if (r == ENOMEM) {
+		dmprintk("crypto_dispatch returned ENOMEM \n");
+	}
+	return r;
+}
+
+static int ocf_crypt_convert_block(struct crypt_config *cc,
+				struct convert_context *ctx,
+				struct ablkcipher_request *req,
+				int write, void *priv)
+{
+	struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
+	struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
+	struct dm_crypt_request *dmreq;
+	u8 iv[cc->iv_size];
+	int r = 0;
+
+	dmreq = dmreq_of_req(cc, req);
+	dmreq->iv_sector = ctx->sector;
+	dmreq->ctx = ctx;
+	sg_init_table(&dmreq->sg_in, 1);
+	sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
+		    bv_in->bv_offset + ctx->offset_in);
+
+	sg_init_table(&dmreq->sg_out, 1);
+	sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
+		    bv_out->bv_offset + ctx->offset_out);
+
+	ctx->offset_in += 1 << SECTOR_SHIFT;
+	if (ctx->offset_in >= bv_in->bv_len) {
+		ctx->offset_in = 0;
+		ctx->idx_in++;
+	}
+
+	ctx->offset_out += 1 << SECTOR_SHIFT;
+	if (ctx->offset_out >= bv_out->bv_len) {
+		ctx->offset_out = 0;
+		ctx->idx_out++;
+	}
+
+	if (cc->iv_gen_ops) {
+		r = cc->iv_gen_ops->generator(cc, iv, dmreq);
+		if (r < 0)
+			return r;
+		r = dm_ocf_process(cc, &dmreq->sg_out, &dmreq->sg_in, dmreq->sg_in.length, iv, cc->iv_size, write, priv);
+	} else {
+		r = dm_ocf_process(cc, &dmreq->sg_in, &dmreq->sg_in, dmreq->sg_in.length, NULL, 0, write, priv);
+	}
+
+	return r;
+}
+
+/*
+ * Encrypt / decrypt data from one bio to another one (can be the same one)
+ */
+static int ocf_crypt_convert(struct crypt_config *cc,
+			 struct convert_context *ctx, struct dm_crypt_io *io)
+{
+	int r = 0;
+	long wr_timeout = 300000;
+	long rd_timeout = 300000;
+	long wr_tm, rd_tm;
+	int num = 0, num1 =0;
+	void *priv = NULL;
+	struct ocf_wr_priv *ocf_wr_priv = NULL;
+	struct ocf_rd_priv *ocf_rd_priv = NULL;
+	int write=0;
+	struct crypt_cpu *this_cc = this_crypt_config(cc);
+
+	atomic_set(&ctx->pending, 1);
+
+
+	if(bio_data_dir(ctx->bio_in) == WRITE) {
+		write = 1;
+		ocf_wr_priv = kmalloc(sizeof(struct ocf_wr_priv),GFP_KERNEL);
+		if(!ocf_wr_priv) {
+			printk("ocf_crypt_convert: out of memory \n");
+			return -ENOMEM;
+		}
+		ocf_wr_priv->dm_ocf_wr_pending = 0;
+		ocf_wr_priv->dm_ocf_wr_completed = 0;
+		init_waitqueue_head(&ocf_wr_priv->dm_ocf_wr_queue);
+		priv = ocf_wr_priv;
+	}
+	else {
+		ocf_rd_priv = kmalloc(sizeof(struct ocf_rd_priv),GFP_KERNEL);
+		if(!ocf_rd_priv) {
+			printk("ocf_crypt_convert: out of memory \n");
+			return -ENOMEM;
+		}
+		ocf_rd_priv->dm_ocf_rd_pending = 0;
+		ocf_rd_priv->dm_ocf_rd_completed = 0;
+		init_waitqueue_head(&ocf_rd_priv->dm_ocf_rd_queue);
+		priv = ocf_rd_priv;
+	}
+
+	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
+	      ctx->idx_out < ctx->bio_out->bi_vcnt) {
+
+		crypt_alloc_req(cc, ctx);
+
+		atomic_inc(&ctx->pending);
+
+		if(bio_data_dir(ctx->bio_in) == WRITE) {
+			num++;
+		}
+		/* if last read in the context - send the io, so the OCF read callback will release the IO. */
+		else {
+			num1++;
+		}
+
+		r = ocf_crypt_convert_block(cc, ctx, this_cc->req, write, priv);
+
+		switch (r) {
+			case 0:
+				atomic_dec(&ctx->pending);
+				ctx->sector++;
+				//cond_resched();
+				continue;
+			/* error */
+			default:
+				atomic_dec(&ctx->pending);
+				//return r;
+		}
+	}
+
+	if(bio_data_dir(ctx->bio_in) == WRITE) {
+		ocf_wr_priv->dm_ocf_wr_pending += num;
+		wr_tm = wait_event_timeout(ocf_wr_priv->dm_ocf_wr_queue,
+					(ocf_wr_priv->dm_ocf_wr_pending == ocf_wr_priv->dm_ocf_wr_completed)
+										, msecs_to_jiffies(wr_timeout) );
+		if (!wr_tm) {
+			printk("ocf_crypt_convert: wr work was not finished in %ld msecs, %d pending %d completed.\n",
+					wr_timeout, ocf_wr_priv->dm_ocf_wr_pending, ocf_wr_priv->dm_ocf_wr_completed);
+		}
+		kfree(ocf_wr_priv);
+	}
+	else {
+		ocf_rd_priv->dm_ocf_rd_pending += num1;
+		rd_tm = wait_event_timeout(ocf_rd_priv->dm_ocf_rd_queue,
+					(ocf_rd_priv->dm_ocf_rd_pending == ocf_rd_priv->dm_ocf_rd_completed)
+										, msecs_to_jiffies(rd_timeout) );
+		if (!rd_tm) {
+			printk("ocf_crypt_convert: rd work was not finished in %ld msecs, %d pending %d completed.\n",
+					rd_timeout, ocf_rd_priv->dm_ocf_rd_pending, ocf_rd_priv->dm_ocf_rd_completed);
+		}
+		kfree(ocf_rd_priv);
+	}
+
+	return r;
+}
+#endif /*CONFIG_OCF_DM_CRYPT*/
+
 static int crypt_convert_block(struct crypt_config *cc,
 			       struct convert_context *ctx,
 			       struct ablkcipher_request *req)
@@ -749,15 +1040,19 @@
 			    struct convert_context *ctx)
 {
 	struct crypt_cpu *this_cc = this_crypt_config(cc);
+#if !defined(CONFIG_OCF_DM_CRYPT)
 	unsigned key_index = ctx->sector & (cc->tfms_count - 1);
+#endif
 
 	if (!this_cc->req)
 		this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
 
+#if !defined(CONFIG_OCF_DM_CRYPT)
 	ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]);
 	ablkcipher_request_set_callback(this_cc->req,
 	    CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
 	    kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
+#endif
 }
 
 /*
@@ -1102,10 +1397,18 @@
 		sector += bio_sectors(clone);
 
 		crypt_inc_pending(io);
+#if defined(CONFIG_OCF_DM_CRYPT)
+		r = ocf_crypt_convert(cc, &io->ctx, io);
 
+		if(r < 0) {
+			printk("\n%s() ocf_crypt_convert failed\n",__FUNCTION__);
+			io->error = -EIO;
+		}
+#else
 		r = crypt_convert(cc, &io->ctx);
 		if (r < 0)
 			io->error = -EIO;
+#endif
 
 		crypt_finished = atomic_dec_and_test(&io->ctx.pending);
 
@@ -1177,9 +1480,17 @@
 	crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
 			   io->sector);
 
+#if defined(CONFIG_OCF_DM_CRYPT)
+	r = ocf_crypt_convert(cc, &io->ctx, io);
+	if(r < 0) {
+		printk("\n%s() ocf_crypt_convert failed\n",__FUNCTION__);
+		io->error = -EIO;
+	}
+#else
 	r = crypt_convert(cc, &io->ctx);
 	if (r < 0)
 		io->error = -EIO;
+#endif
 
 	if (atomic_dec_and_test(&io->ctx.pending))
 		kcryptd_crypt_read_done(io);
@@ -1341,7 +1652,11 @@
 
 	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
 
+#if !defined(CONFIG_OCF_DM_CRYPT)
 	r = crypt_setkey_allcpus(cc);
+#else
+	r = 0;
+#endif
 
 out:
 	/* Hex key string not needed after here, so wipe it. */
@@ -1355,7 +1670,11 @@
 	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
 	memset(&cc->key, 0, cc->key_size * sizeof(u8));
 
+#if !defined(CONFIG_OCF_DM_CRYPT)
 	return crypt_setkey_allcpus(cc);
+#else
+	return 0;
+#endif
 }
 
 static void crypt_dtr(struct dm_target *ti)
@@ -1379,8 +1698,14 @@
 			cpu_cc = per_cpu_ptr(cc->cpu, cpu);
 			if (cpu_cc->req)
 				mempool_free(cpu_cc->req, cc->req_pool);
+#if !defined(CONFIG_OCF_DM_CRYPT)
 			crypt_free_tfms(cc, cpu);
+#endif
 		}
+#if defined(CONFIG_OCF_DM_CRYPT)
+	if(cc->ocf_cryptoid)
+		crypto_freesession(cc->ocf_cryptoid);
+#endif
 
 	if (cc->bs)
 		bioset_free(cc->bs);
@@ -1414,7 +1739,11 @@
 	struct crypt_config *cc = ti->private;
 	char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
 	char *cipher_api = NULL;
+#if !defined(CONFIG_OCF_DM_CRYPT)
 	int cpu, ret = -EINVAL;
+#else
+	int ret = -EINVAL;
+#endif
 
 	/* Convert to crypto api definition? */
 	if (strchr(cipher_in, '(')) {
@@ -1487,6 +1816,40 @@
 		goto bad_mem;
 	}
 
+	/* Initialize and set key */
+	ret = crypt_set_key(cc, key);
+	if (ret < 0) {
+		ti->error = "Error decoding and setting key";
+		goto bad;
+	}
+
+#if defined(CONFIG_OCF_DM_CRYPT)
+	/* prepare a new OCF session */
+	memset(&cc->cr_dm, 0, sizeof(struct cryptoini));
+
+	if((strcmp(cipher,"aes") == 0) && (strcmp(chainmode, "cbc") == 0))
+		cc->cr_dm.cri_alg  = CRYPTO_AES_CBC;
+	else if((strcmp(cipher,"des") == 0) && (strcmp(chainmode, "cbc") == 0))
+		cc->cr_dm.cri_alg  = CRYPTO_DES_CBC;
+	else if((strcmp(cipher,"des3_ede") == 0) && (strcmp(chainmode, "cbc") == 0))
+		cc->cr_dm.cri_alg  = CRYPTO_3DES_CBC;
+	else {
+		dmprintk("\ncrypt_ctr: using OCF: unknown cipher or bad chain mode\n");
+		ti->error = DM_MSG_PREFIX "using OCF: unknown cipher or bad chain mode";
+		goto bad;
+	}
+
+	//dmprintk("key size is %d\n",cc->key_size);
+	cc->cr_dm.cri_klen = cc->key_size*8;
+	cc->cr_dm.cri_key  = cc->key;
+	cc->cr_dm.cri_next = NULL;
+
+	if(crypto_newsession(&cc->ocf_cryptoid, &cc->cr_dm, CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)){
+		dmprintk("crypt_ctr: crypto_newsession failed\n");
+		ti->error = DM_MSG_PREFIX "crypto_newsession failed";
+		goto bad;
+	}
+#else
 	/* Allocate cipher */
 	for_each_possible_cpu(cpu) {
 		ret = crypt_alloc_tfms(cc, cpu, cipher_api);
@@ -1496,13 +1859,6 @@
 		}
 	}
 
-	/* Initialize and set key */
-	ret = crypt_set_key(cc, key);
-	if (ret < 0) {
-		ti->error = "Error decoding and setting key";
-		goto bad;
-	}
-
 	/* Initialize IV */
 	cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
 	if (cc->iv_size)
@@ -1513,6 +1869,7 @@
 		DMWARN("Selected cipher does not support IVs");
 		ivmode = NULL;
 	}
+#endif/*CONFIG_OCF_DM_CRYPT*/
 
 	/* Choose ivmode, see comments at iv code. */
 	if (ivmode == NULL)
@@ -1523,8 +1880,10 @@
 		cc->iv_gen_ops = &crypt_iv_plain64_ops;
 	else if (strcmp(ivmode, "essiv") == 0)
 		cc->iv_gen_ops = &crypt_iv_essiv_ops;
+#if !defined(CONFIG_OCF_DM_CRYPT)
 	else if (strcmp(ivmode, "benbi") == 0)
 		cc->iv_gen_ops = &crypt_iv_benbi_ops;
+#endif
 	else if (strcmp(ivmode, "null") == 0)
 		cc->iv_gen_ops = &crypt_iv_null_ops;
 	else if (strcmp(ivmode, "lmk") == 0) {
@@ -1541,6 +1900,17 @@
 		goto bad;
 	}
 
+#if defined(CONFIG_OCF_DM_CRYPT)
+	switch (cc->cr_dm.cri_alg) {
+		case CRYPTO_AES_CBC:
+			cc->iv_size = 16;
+			break;
+		default:
+			cc->iv_size = 8;
+			break;
+	}
+#endif
+
 	/* Allocate IV */
 	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
 		ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
@@ -1550,6 +1920,7 @@
 		}
 	}
 
+#if !defined(CONFIG_OCF_DM_CRYPT)
 	/* Initialize IV (set keys for ESSIV etc) */
 	if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
 		ret = cc->iv_gen_ops->init(cc);
@@ -1558,6 +1929,7 @@
 			goto bad;
 		}
 	}
+#endif
 
 	ret = 0;
 bad:
@@ -1613,10 +1985,12 @@
 	}
 
 	cc->dmreq_start = sizeof(struct ablkcipher_request);
+#if !defined(CONFIG_OCF_DM_CRYPT)
 	cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
 	cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
 	cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
 			   ~(crypto_tfm_ctx_alignment() - 1);
+#endif
 
 	cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
 			sizeof(struct dm_crypt_request) + cc->iv_size);
@@ -1895,6 +2269,10 @@
 		kmem_cache_destroy(_crypt_io_pool);
 	}
 
+#ifdef CONFIG_OCF_DM_CRYPT
+	printk("dm_crypt using the OCF for crypto acceleration.\n");
+#endif
+
 	return r;
 }
 
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 2d97bf0..09846f0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -571,12 +571,30 @@
 			best_good_sectors = sectors;
 
 		dist = abs(this_sector - conf->mirrors[disk].head_position);
+		/* Prefer idle disk, but still choose best if more than one idle disk:
+		 * We add an artifical, arbitrary (Max/2) weight to busy disks to favor 
+		 * idle disks. If the distance is greater than Max/2, then we make it
+		 * the maximum value. We may not choose the smaller distance if all disks
+		 * fall into that situation, but since all disks will have long seek times
+		 * we don't really care.
+		 * We may end up choosing a busy disk if idle disks have a very high 
+		 * distance, but this may actually be preferable to minimize seek times.
+		 * If dist == 0, we choose that disk even if it busy to again minimize seek
+		 * times, as it is likely a sequential read with a single pending request.
+		 * TODO: use per disk next_seq_sect to better detect sequential reads?
+		 */
+		if (dist == 0) {
+			best_disk = disk;
+			break;
+		}
+		if (atomic_read(&rdev->nr_pending) != 0) {
+			if (dist < MaxSector/2)
+				dist += MaxSector / 2; 
+			else dist = MaxSector - 1;
+		}
 		if (choose_first
 		    /* Don't change to another disk for sequential reads */
-		    || conf->next_seq_sect == this_sector
-		    || dist == 0
-		    /* If device is idle, use it */
-		    || atomic_read(&rdev->nr_pending) == 0) {
+		    || conf->next_seq_sect == this_sector) {
 			best_disk = disk;
 			break;
 		}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 26ef63a..2316102 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -485,6 +485,28 @@
 static void
 raid5_end_write_request(struct bio *bi, int error);
 
+#ifdef CONFIG_RAID_ZERO_COPY
+static inline void r5dev_switch_page(struct r5dev *dev, struct page *page)
+{
+	BUG_ON(dev->page_save != NULL);
+	BUG_ON(dev->page != bio_iovec_idx(&dev->req, 0)->bv_page);
+	/* The pointer must be restored whenever the LOCKED gets cleared. */
+	dev->page_save = dev->page;
+	dev->page = bio_iovec_idx(&dev->req, 0)->bv_page = page;
+	kmap(dev->page); /* for sync_xor on 32-bit systems */
+}
+
+static inline void r5dev_restore_page(struct r5dev *dev)
+{
+	BUG_ON(dev->page_save == NULL);
+	BUG_ON(dev->page != bio_iovec_idx(&dev->req, 0)->bv_page);
+	BUG_ON(dev->page == dev->page_save);
+	kunmap(dev->page_save);
+	dev->page = bio_iovec_idx(&dev->req, 0)->bv_page = dev->page_save;
+	dev->page_save = NULL;
+}
+#endif
+
 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
 {
 	struct r5conf *conf = sh->raid_conf;
@@ -585,6 +607,11 @@
 				set_bit(STRIPE_DEGRADED, &sh->state);
 			pr_debug("skip op %ld on disc %d for sector %llu\n",
 				bi->bi_rw, i, (unsigned long long)sh->sector);
+#ifdef CONFIG_RAID_ZERO_COPY
+                       if (test_bit(R5_DirectAccess, &sh->dev[i].flags)) {
+			       r5dev_restore_page(&sh->dev[i]);
+                       }
+#endif
 			clear_bit(R5_LOCKED, &sh->dev[i].flags);
 			set_bit(STRIPE_HANDLE, &sh->state);
 		}
@@ -781,9 +808,23 @@
 		__func__, (unsigned long long)sh->sector, target);
 	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 
+#if 0
 	for (i = disks; i--; )
 		if (i != target)
 			xor_srcs[count++] = sh->dev[i].page;
+#else
+       for (i = disks; i--; ) {
+               struct r5dev *dev = &sh->dev[i];
+               struct page *pg = dev->page;
+               if (i != target) {
+#ifdef CONFIG_RAID_ZERO_COPY
+                       if (test_bit(R5_DirectAccess, &dev->flags))
+                               pg = dev->req.bi_io_vec[0].bv_page;
+#endif
+                       xor_srcs[count++] = pg;
+               }
+       }
+#endif
 
 	atomic_inc(&sh->count);
 
@@ -1020,8 +1061,19 @@
 	for (i = disks; i--; ) {
 		struct r5dev *dev = &sh->dev[i];
 		/* Only process blocks that are known to be uptodate */
+#if 0
 		if (test_bit(R5_Wantdrain, &dev->flags))
 			xor_srcs[count++] = dev->page;
+#else
+               if (test_bit(R5_Wantdrain, &dev->flags)) {
+                       struct page *pg = dev->page;
+#ifdef CONFIG_RAID_ZERO_COPY
+                       if (test_bit(R5_DirectAccess, &dev->flags))
+                               pg = dev->req.bi_io_vec[0].bv_page;
+#endif
+                       xor_srcs[count++] = pg;
+               }
+#endif
 	}
 
 	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
@@ -1031,12 +1083,55 @@
 	return tx;
 }
 
+#ifdef CONFIG_RAID_ZERO_COPY
+static struct page *raid5_zero_copy(struct bio *bio, sector_t sector)
+{
+       sector_t bi_sector = bio->bi_sector;
+       struct page *page = NULL;
+       struct bio_vec *bv;
+       int i;
+
+       bio_for_each_segment(bv, bio, i) {
+               if (sector == bi_sector)
+                       page = bio_iovec_idx(bio, i)->bv_page;
+
+               bi_sector += bio_iovec_idx(bio, i)->bv_len >> 9;
+               if (bi_sector >= sector + STRIPE_SECTORS) {
+                       /* check if the stripe is covered by one page */
+                       if (page == bio_iovec_idx(bio, i)->bv_page) {
+                               SetPageConstant(page);
+                               return page;
+                       }
+                       return NULL;
+               }
+       }
+       return NULL;
+}
+#endif
+
+
+
 static struct dma_async_tx_descriptor *
 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
 {
 	int disks = sh->disks;
 	int i;
 
+#ifdef CONFIG_RAID_ZERO_COPY
+//This is the workaround for the data corruption which happens in the read-modify-write writes scenario
+        int write_count = 0;
+        int do_zero_copy = 0;
+
+        for (i = disks; i--; )
+        {
+                struct r5dev *dev = &sh->dev[i];
+                if(dev->towrite)
+                        write_count++;
+        }
+        if(write_count == (disks-1))
+                do_zero_copy = 1;
+#endif
+
 	pr_debug("%s: stripe %llu\n", __func__,
 		(unsigned long long)sh->sector);
 
@@ -1052,10 +1147,30 @@
 			dev->towrite = NULL;
 			BUG_ON(dev->written);
 			wbi = dev->written = chosen;
-			spin_unlock_irq(&sh->raid_conf->device_lock);
+#ifdef CONFIG_RAID_ZERO_COPY
+                       set_bit(R5_LOCKED, &dev->flags);
+                       BUG_ON(test_bit(R5_DirectAccess, &dev->flags));
+		       spin_unlock_irq(&sh->raid_conf->device_lock);
+                       if (!wbi->bi_next && test_bit(R5_OVERWRITE, &dev->flags)
+                                       && test_bit(R5_Insync, &dev->flags) && do_zero_copy) {
+                               struct page *pg = raid5_zero_copy(wbi,
+                                                               dev->sector);
+                               if (pg) {
+                                       set_bit(R5_DirectAccess, &dev->flags);
+				       r5dev_switch_page(dev, pg);
+                                       clear_bit(R5_UPTODATE, &dev->flags);
+                                       clear_bit(R5_OVERWRITE, &dev->flags);
+				       continue;
+			       }
+		       }
 
-			while (wbi && wbi->bi_sector <
-				dev->sector + STRIPE_SECTORS) {
+		       clear_bit(R5_OVERWRITE, &dev->flags);
+		       set_bit(R5_UPTODATE, &dev->flags);
+#else
+		       spin_unlock_irq(&sh->raid_conf->device_lock);
+#endif
+		       while (wbi && wbi->bi_sector <
+				       dev->sector + STRIPE_SECTORS) {
 				if (wbi->bi_rw & REQ_FUA)
 					set_bit(R5_WantFUA, &dev->flags);
 				tx = async_copy_data(1, wbi, dev->page,
@@ -1086,13 +1201,17 @@
 	for (i = disks; i--; ) {
 		struct r5dev *dev = &sh->dev[i];
 
+#ifdef CONFIG_RAID_ZERO_COPY
+		if ((dev->written && !test_bit(R5_DirectAccess, &dev->flags)) || i == pd_idx || i == qd_idx) {
+#else
 		if (dev->written || i == pd_idx || i == qd_idx) {
+#endif
 			set_bit(R5_UPTODATE, &dev->flags);
 			if (fua)
 				set_bit(R5_WantFUA, &dev->flags);
 		}
 	}
-
+		
 	if (sh->reconstruct_state == reconstruct_state_drain_run)
 		sh->reconstruct_state = reconstruct_state_drain_result;
 	else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
@@ -1129,15 +1248,39 @@
 		xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
 		for (i = disks; i--; ) {
 			struct r5dev *dev = &sh->dev[i];
+#if 0
 			if (dev->written)
 				xor_srcs[count++] = dev->page;
+#else
+                       struct page *pg = dev->page;
+
+                       if (dev->written) {
+#ifdef CONFIG_RAID_ZERO_COPY
+                               if (test_bit(R5_DirectAccess, &dev->flags))
+                                       pg = dev->req.bi_io_vec[0].bv_page;
+#endif
+                               xor_srcs[count++] = pg;
+                       }
+#endif
 		}
 	} else {
 		xor_dest = sh->dev[pd_idx].page;
 		for (i = disks; i--; ) {
 			struct r5dev *dev = &sh->dev[i];
+#if 0
 			if (i != pd_idx)
 				xor_srcs[count++] = dev->page;
+#else
+                       struct page *pg = dev->page;
+
+                       if (i != pd_idx) {
+#ifdef CONFIG_RAID_ZERO_COPY
+                               if (test_bit(R5_DirectAccess, &dev->flags))
+                                       pg = dev->req.bi_io_vec[0].bv_page;
+#endif
+                               xor_srcs[count++] = pg;
+                       }
+#endif
 		}
 	}
 
@@ -1700,6 +1843,12 @@
 		set_bit(R5_MadeGood, &sh->dev[i].flags);
 
 	rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
+
+#ifdef CONFIG_RAID_ZERO_COPY
+       if (test_bit(R5_DirectAccess, &sh->dev[i].flags)) {
+	       r5dev_restore_page(&sh->dev[i]);
+       }
+#endif
 	
 	clear_bit(R5_LOCKED, &sh->dev[i].flags);
 	set_bit(STRIPE_HANDLE, &sh->state);
@@ -2520,7 +2669,11 @@
 		if (sh->dev[i].written) {
 			dev = &sh->dev[i];
 			if (!test_bit(R5_LOCKED, &dev->flags) &&
-				test_bit(R5_UPTODATE, &dev->flags)) {
+				(test_bit(R5_UPTODATE, &dev->flags)
+#ifdef CONFIG_RAID_ZERO_COPY
+                               || test_bit(R5_DirectAccess, &dev->flags)
+#endif
+				)) {
 				/* We can return any write requests */
 				struct bio *wbi, *wbi2;
 				int bitmap_end = 0;
@@ -2528,6 +2681,9 @@
 				spin_lock_irq(&conf->device_lock);
 				wbi = dev->written;
 				dev->written = NULL;
+#ifdef CONFIG_RAID_ZERO_COPY
+                               clear_bit(R5_DirectAccess, &dev->flags);
+#endif
 				while (wbi && wbi->bi_sector <
 					dev->sector + STRIPE_SECTORS) {
 					wbi2 = r5_next_bio(wbi, dev->sector);
@@ -3227,7 +3383,11 @@
 		/* All the 'written' buffers and the parity block are ready to
 		 * be written back to disk
 		 */
+#ifdef CONFIG_RAID_ZERO_COPY
+		BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && !test_bit(R5_DirectAccess, &sh->dev[sh->pd_idx].flags));
+#else
 		BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
+#endif
 		BUG_ON(sh->qd_idx >= 0 &&
 		       !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags));
 		for (i = disks; i--; ) {
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index e10c553..9a1c2c4 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -208,6 +208,7 @@
 	atomic_t		count;	      /* nr of active thread/requests */
 	int			bm_seq;	/* sequence number for bitmap flushes */
 	int			disks;		/* disks in stripe */
+	int 		zero_copy_count;
 	enum check_states	check_state;
 	enum reconstruct_states reconstruct_state;
 	/**
@@ -228,10 +229,11 @@
 	struct r5dev {
 		struct bio	req;
 		struct bio_vec	vec;
-		struct page	*page;
+		struct page	*page, *page_save;;
 		struct bio	*toread, *read, *towrite, *written;
 		sector_t	sector;			/* sector of this page */
 		unsigned long	flags;
+		dma_addr_t	pg_dma;
 	} dev[1]; /* allocated with extra space depending of RAID geometry */
 };
 
@@ -275,6 +277,14 @@
 #define	R5_WantFUA	14	/* Write should be FUA */
 #define	R5_WriteError	15	/* got a write error - need to record it */
 #define	R5_MadeGood	16	/* A bad block has been fixed by writing to it*/
+
+#ifdef CONFIG_RAID_ZERO_COPY
+#define R5_DirectAccess 17 /* access cached pages directly instead of
+                                       * sh pages */
+#define R5_DirectAccessLock 18
+#endif
+
+
 /*
  * Write method
  */
@@ -306,6 +316,7 @@
 	STRIPE_BIOFILL_RUN,
 	STRIPE_COMPUTE_RUN,
 	STRIPE_OPS_REQ_PENDING,
+	STRIPE_ZERO_COPY_OPS,
 };
 
 /*
diff --git a/drivers/mtd/nand/comcerto_nand.c b/drivers/mtd/nand/comcerto_nand.c
index 706471f..3f19fa8 100644
--- a/drivers/mtd/nand/comcerto_nand.c
+++ b/drivers/mtd/nand/comcerto_nand.c
@@ -135,9 +135,9 @@
 static struct nand_bbt_descr bbt_main_descr = {
 	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
 		| NAND_BBT_8BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
-	.offs = 180,
+	.offs = 44,
 	.len = 4,
-	.veroffs = 184,
+	.veroffs = 48,
 	.maxblocks = 8,
 	.pattern = bbt_pattern,
 };
@@ -145,9 +145,9 @@
 static struct nand_bbt_descr bbt_mirror_descr = {
 	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
 		| NAND_BBT_8BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
-	.offs = 180,
+	.offs = 44,
 	.len = 4,
-	.veroffs = 184,
+	.veroffs = 48,
 	.maxblocks = 8,
 	.pattern = mirror_pattern,
 };
@@ -198,12 +198,12 @@
 
 #if defined (CONFIG_NAND_COMCERTO_ECC_8_HW_BCH) || defined (CONFIG_NAND_COMCERTO_ECC_24_HW_BCH)
 	ecc_gen_cfg_val &= BCH_MODE;
+	ecc_gen_cfg_val = (ecc_gen_cfg_val & ~(ECC_LVL_MASK)) | (ECC_LVL_VAL << ECC_LVL_SHIFT);
 #else
 	ecc_gen_cfg_val |= HAMM_MODE;
 #endif
 
-	ecc_gen_cfg_val = (ecc_gen_cfg_val & ~(ECC_LVL_MASK)) | (ECC_LVL_VAL << ECC_LVL_SHIFT);
-	ecc_gen_cfg_val = (ecc_gen_cfg_val & ~(BLK_SIZE_MASK)) | nand_device->ecc.size; ;
+	ecc_gen_cfg_val = (ecc_gen_cfg_val & ~(BLK_SIZE_MASK)) | nand_device->ecc.size;
 
 	writel_relaxed(ecc_gen_cfg_val, ecc_base_addr + ECC_GEN_CFG);
 	/* Reset parity module and latch configured values */
@@ -292,13 +292,6 @@
 
 	udelay(25);
 
-	/* Check if the block has uncorrectable number of errors */
-	if ((readl_relaxed(ecc_base_addr + ECC_CORR_STAT)) & ECC_UNCORR) {
-		printk_ratelimited(KERN_WARNING "ECC: uncorrectable error  2 !!!\n");
-		temp_nand_ecc_errors[1] += 1 ;
-		return -EIO;
-	}
-
 	err_corr_data_prev = 0;
 	/* Read Correction data status register till header is 0x7FD */
 	do {
@@ -331,10 +324,16 @@
 
 	if (!((readl_relaxed(ecc_base_addr + ECC_CORR_DONE_STAT)) & ECC_DONE)) {
 		temp_nand_ecc_errors[0] += 1 ;
-		printk(KERN_WARNING "ECC: uncorrectable error 1 !!!\n");
+		printk_ratelimited(KERN_WARNING "ECC: uncorrectable error 1 !!!\n");
 		return -1;
 	}
 
+	/* Check if the block has uncorrectable number of errors */
+	if ((readl_relaxed(ecc_base_addr + ECC_CORR_STAT)) & ECC_UNCORR) {
+		printk_ratelimited(KERN_WARNING "ECC: uncorrectable error  2 !!!\n");
+		temp_nand_ecc_errors[1] += 1 ;
+		return -EIO;
+	}
 
 	temp_nand_ecc_errors[3] += err_count;
 
@@ -659,6 +658,7 @@
 	nand_device->bbt_md = &bbt_mirror_descr;
 #endif
 	nand_device->badblock_pattern = &c2000_badblock_pattern;
+	nand_device->bbt_options |= NAND_BBT_USE_FLASH;
 
 	} else {
 		printk("using soft ecc.\n");
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 83a5a5a..0a728ad 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -35,6 +35,11 @@
 #include <asm/irq.h>
 #include <asm/uaccess.h>
 
+#ifdef CONFIG_ARCH_M86XXX
+#include <mach/comcerto-2000/pm.h>
+#endif
+
+
 MODULE_DESCRIPTION("PHY library");
 MODULE_AUTHOR("Andy Fleming");
 MODULE_LICENSE("GPL");
@@ -885,7 +890,17 @@
 int genphy_suspend(struct phy_device *phydev)
 {
 	int value;
-
+#ifdef CONFIG_ARCH_M86XXX
+	/* Check for the Bit_Mask bit for WoL, if it is enabled
+	 * then we are not going suspend the WoL device , as by
+	 * this device , we will wake from System Resume.
+	 */
+	if ( !(host_utilpe_shared_pmu_bitmask & WOL_IRQ )){
+		/* We will just return
+		 */
+		return 0;
+	}
+#endif
 	mutex_lock(&phydev->lock);
 
 	value = phy_read(phydev, MII_BMCR);
@@ -900,7 +915,17 @@
 int genphy_resume(struct phy_device *phydev)
 {
 	int value;
-
+#ifdef CONFIG_ARCH_M86XXX
+	/* Check for the Bit_Mask bit for WoL, if it is enabled
+	 * then we are not going suspend the WoL device , as by
+	 * this device , we will wake from System Resume.
+	 */
+	if ( !(host_utilpe_shared_pmu_bitmask & WOL_IRQ )){
+		/* We will just return
+		 */
+		return 0;
+	}
+#endif
 	mutex_lock(&phydev->lock);
 
 	value = phy_read(phydev, MII_BMCR);
diff --git a/drivers/spi/designware_spi.c b/drivers/spi/designware_spi.c
index 19ddfc5..4211ede 100644
--- a/drivers/spi/designware_spi.c
+++ b/drivers/spi/designware_spi.c
@@ -180,8 +180,8 @@
 	struct designware_spi *dwspi = spi_master_get_devdata(spi->master);
 	u16 ctrlr0 = readw(dwspi->regs + DWSPI_CTRLR0);
 
-	bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
-	hz = (t) ? t->speed_hz : spi->max_speed_hz;
+	bits_per_word = (t) ? (t->bits_per_word ? t->bits_per_word : spi->bits_per_word) : spi->bits_per_word;
+	hz = (t) ? (t->speed_hz ? t->speed_hz : spi->max_speed_hz) : spi->max_speed_hz;
 
 	if (bits_per_word < 4 || bits_per_word > 16) {
 		dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
@@ -272,8 +272,16 @@
 			writew(dr, dwspi->regs + DWSPI_DR);
 			dwspi->remaining_tx_bytes -= 2;
 		}
-		if (--tx_limit <= 0)
+
+		if(dwspi->bits_per_word <= 8){
+			--tx_limit;
+		}else{
+			tx_limit -= 2;
+		}
+
+		if (tx_limit <= 0)
 			break;
+
 		sr = readb(dwspi->regs + DWSPI_SR);
 	}
 
@@ -296,15 +304,6 @@
 			dwspi->remaining_tx_bytes = dwspi->tx_t->len;
 			dwspi->status = 0;
 
-			/* can't change speed or bits in the middle of a
-			 * message. must disable the controller for this.
-			 */
-			if (dwspi->tx_t->speed_hz
-					|| dwspi->tx_t->bits_per_word) {
-				dwspi->status = -ENOPROTOOPT;
-				break;
-			}
-
 			if (!dwspi->tx_t->tx_buf && !dwspi->tx_t->rx_buf
 					&& dwspi->tx_t->len) {
 				dwspi->status = -EINVAL;
@@ -329,6 +328,7 @@
 	}
 
 	complete(&dwspi->done);
+
 	return 1;
 }
 
@@ -518,7 +518,7 @@
 			dwspi->remaining_tx_bytes =
 				dwspi->remaining_rx_bytes = 0;
 			dwspi->tx_count = dwspi->rx_count = 0;
-			designware_spi_setup_transfer(m->spi, NULL);
+			designware_spi_setup_transfer(m->spi, dwspi->tx_t);
 			dwspi_enable(dwspi, 1);
 			designware_spi_do_transfers(dwspi);
 			dwspi_enable(dwspi, 0);
@@ -530,7 +530,6 @@
 
 		m->status = dwspi->status;
 		m->complete(m->context);
-
 		spin_lock(&dwspi->qlock);
 	}
 	spin_unlock(&dwspi->qlock);
diff --git a/drivers/spi2/busses/comcerto_spi.c b/drivers/spi2/busses/comcerto_spi.c
index 017cd6d..d8e2d6f 100644
--- a/drivers/spi2/busses/comcerto_spi.c
+++ b/drivers/spi2/busses/comcerto_spi.c
@@ -222,13 +222,14 @@
  *
  *
  */
-static int do_write_only_transfer16(struct comcerto_spi *spi, u16 *buf, unsigned int *len)
+static int do_write_only_transfer16(struct comcerto_spi *spi, u16 *buf, unsigned int *len, u32 ser_reg)
 {
 	unsigned int len_now;
 	int rc = 0;
 	unsigned int tmp = *len;
 	u32 dr = spi->membase + COMCERTO_SPI_DR;
 	u32 txflr = spi->membase + COMCERTO_SPI_TXFLR;
+	int ser_done = 0;
 
 //      printk(KERN_INFO "do_write_only_transfer(%#lx, %#lx, %d)\n", (unsigned long)spi, (unsigned long)buf, *len);
 
@@ -241,6 +242,12 @@
 
 		while (len_now--)
 			__raw_writew(cpu_to_le16(*buf++), dr);
+
+		if (!ser_done)
+		{
+			__raw_writel(ser_reg, spi->membase + COMCERTO_SPI_SER);
+			ser_done = 1;
+		}
 	}
 
 	*len -= tmp;
@@ -370,7 +377,7 @@
 		if (transfer->fs <= 8)
 			rc = do_write_only_transfer8(spi, transfer->wbuf, &transfer->wlen, ser);
 		else
-			rc = do_write_only_transfer16(spi, (u16 *) transfer->wbuf, &transfer->wlen);
+			rc = do_write_only_transfer16(spi, (u16 *) transfer->wbuf, &transfer->wlen, ser);
 
 		break;
 
diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
index d72bca7..ad90014 100644
--- a/drivers/tty/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -426,10 +426,36 @@
 	return readb(p->membase + offset);
 }
 
+#ifdef CONFIG_ARCH_M86XXX
+void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p);
+
+void dw8250_force_idle(struct uart_port *p)
+{
+	struct uart_8250_port *up = container_of(p, struct uart_8250_port, port);
+
+	serial8250_clear_and_reinit_fifos(up);
+	(void)p->serial_in(p, UART_RX);
+}
+#endif
+
 static void mem_serial_out(struct uart_port *p, int offset, int value)
 {
-	offset = map_8250_out_reg(p, offset) << p->regshift;
-	writeb(value, p->membase + offset);
+	int offset1 = map_8250_out_reg(p, offset) << p->regshift;
+	writeb(value, p->membase + offset1);
+
+#ifdef CONFIG_ARCH_M86XXX
+	/* Make sure LCR write wasn't ignored */
+	if (offset == UART_LCR) {
+		int tries = 1000;
+		while (tries--) {
+			if (value == p->serial_in(p, UART_LCR))
+				return;
+			dw8250_force_idle(p);
+			writeb(value, p->membase + offset1);
+		}
+		dev_err(p->dev, "Couldn't set LCR to %d\n", value);
+	}
+#endif
 }
 
 static void mem32_serial_out(struct uart_port *p, int offset, int value)
@@ -622,6 +648,17 @@
 	}
 }
 
+#ifdef CONFIG_ARCH_M86XXX
+void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p)
+{
+         unsigned char fcr;
+
+         serial8250_clear_fifos(p);
+         fcr = uart_config[p->port.type].fcr;
+         serial_outp(p, UART_FCR, fcr);
+}
+#endif
+
 /*
  * IER sleep support.  UARTs which have EFRs need the "extended
  * capability" bit enabled.  Note that on XR16C850s, we need to
@@ -2348,11 +2385,17 @@
 	 * have sufficient FIFO entries for the latency of the remote
 	 * UART to respond.  IOW, at least 32 bytes of FIFO.
 	 */
+#ifdef CONFIG_ARCH_M86XXX
+	up->mcr &= ~UART_MCR_AFE;
+        if (termios->c_cflag & CRTSCTS)
+               up->mcr |= UART_MCR_AFE;
+#else
 	if (up->capabilities & UART_CAP_AFE && up->port.fifosize >= 32) {
 		up->mcr &= ~UART_MCR_AFE;
 		if (termios->c_cflag & CRTSCTS)
 			up->mcr |= UART_MCR_AFE;
 	}
+#endif
 
 	/*
 	 * Ok, we're now changing the port state.  Do it with
diff --git a/drivers/usb/dwc_otg/dwc_otg_cil.c b/drivers/usb/dwc_otg/dwc_otg_cil.c
index c10917b..84f1179 100644
--- a/drivers/usb/dwc_otg/dwc_otg_cil.c
+++ b/drivers/usb/dwc_otg/dwc_otg_cil.c
@@ -1381,7 +1381,7 @@
 		DWC_DEBUGPL(DBG_CIL, "Internal DMA Mode\n");
 		/* Old value was DWC_GAHBCFG_INT_DMA_BURST_INCR - done for
 		  Host mode ISOC in issue fix - vahrama */
-		ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR4;
+		ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR16;
 		core_if->dma_enable = (core_if->core_params->dma_enable != 0);
 		core_if->dma_desc_enable =
 		    (core_if->core_params->dma_desc_enable != 0);
diff --git a/drivers/usb/dwc_otg/dwc_otg_cil_intr.c b/drivers/usb/dwc_otg/dwc_otg_cil_intr.c
index edc5c92..0581b49 100644
--- a/drivers/usb/dwc_otg/dwc_otg_cil_intr.c
+++ b/drivers/usb/dwc_otg/dwc_otg_cil_intr.c
@@ -336,7 +336,8 @@
 		    " ++Connector ID Status Change Interrupt++  (%s)\n",
 		    (dwc_otg_is_host_mode(core_if) ? "Host" : "Device"));
 	
-	DWC_SPINUNLOCK(core_if->lock);
+	if (core_if->lock)
+		DWC_SPINUNLOCK(core_if->lock);
 
 	/*
 	 * Need to schedule a work, as there are possible DELAY function calls
@@ -345,7 +346,8 @@
 
 	DWC_WORKQ_SCHEDULE(core_if->wq_otg, w_conn_id_status_change,
 			   core_if, "connection id status change");
-	DWC_SPINLOCK(core_if->lock);
+	if (core_if->lock)
+		DWC_SPINLOCK(core_if->lock);
 
 	/* Set flag and clear interrupt */
 	gintsts.b.conidstschng = 1;
diff --git a/drivers/usb/dwc_otg/dwc_otg_driver.c b/drivers/usb/dwc_otg/dwc_otg_driver.c
index 717da0b..2afd1bc 100644
--- a/drivers/usb/dwc_otg/dwc_otg_driver.c
+++ b/drivers/usb/dwc_otg/dwc_otg_driver.c
@@ -60,6 +60,7 @@
 #include <mach/hardware.h>
 
 #include <linux/clk.h>
+#include <mach/comcerto-2000/pm.h>
 
 /* USB 3.0 clock */
 static struct clk *usb2_clk;
@@ -1152,6 +1153,19 @@
 	int error_status = 0, val = 0;
 	struct usb_hcd *hcd = NULL;
 
+	/* Check for the Bit Mask bit for USB2, if not enabled
+         * then we are not going suspend the USB2 device , as by
+         * this device , we will wake from resume.
+         */
+        if ( !(host_utilpe_shared_pmu_bitmask & USB2p0_IRQ )){
+
+                /* We will return here.
+                 * Not prepared yet for suspend , so that device suspend
+                 * will not occur.
+                */
+		return error_status;
+	}
+
 	hcd = (struct usb_hcd *) platform_get_drvdata(pd);
 
 	/* Do the port suspend for USB 2.0 Controller */
@@ -1163,6 +1177,10 @@
 	/* Disable the Clock */
 	clk_disable(usb2_clk);
 
+	/* PM Performance Enhancement : USB0 PD */
+	/* Common Block Power-Down Control and powering down all analog blocks */
+	writel(0x01220040, COMCERTO_USB0_PHY_CTRL_REG0);
+
 	return error_status;
 }
 
@@ -1176,6 +1194,24 @@
 	int error_status = 0;
 	struct usb_hcd *hcd = NULL;
 
+
+	/* Check for the Bit Mask bit for USB2, if not enabled
+         * then we are not going suspend the USB2 device , as by
+         * this device , we will wake from resume.
+         */
+        if ( host_utilpe_shared_pmu_bitmask & USB2p0_IRQ ){
+
+                /* We will return here.
+                 * Not prepared yet for suspend , so that device suspend
+                 * will not occur.
+                */
+		return error_status;
+	}
+
+	/* PM Performance Enhancement : USB0 PD */
+	/* Common Block Power-Down Control and powering down all analog blocks */
+	writel(0x00220000, COMCERTO_USB0_PHY_CTRL_REG0);
+
 	/* Enable the Clock */
 	if (clk_enable(usb2_clk)){
 		pr_err("comcerto_usb2_bus_resume_dummy:Unable to enable the usb2 clock \n");
diff --git a/drivers/usb/dwc_otg/dwc_otg_hcd_intr.c b/drivers/usb/dwc_otg/dwc_otg_hcd_intr.c
index d145fa0..9fb3602 100644
--- a/drivers/usb/dwc_otg/dwc_otg_hcd_intr.c
+++ b/drivers/usb/dwc_otg/dwc_otg_hcd_intr.c
@@ -786,6 +786,7 @@
 {
 	dwc_otg_transaction_type_e tr_type;
 	int free_qtd;
+    gintmsk_data_t intr_mask = {.d32 = 0 };
 
 	DWC_DEBUGPL(DBG_HCDV, "  %s: channel %d, halt_status %d\n",
 		    __func__, hc->hc_num, halt_status);
@@ -861,10 +862,13 @@
 	}
 
 	/* Try to queue more transfers now that there's a free channel. */
-	tr_type = dwc_otg_hcd_select_transactions(hcd);
-	if (tr_type != DWC_OTG_TRANSACTION_NONE) {
-		dwc_otg_hcd_queue_transactions(hcd, tr_type);
-	}
+    intr_mask.d32 = DWC_READ_REG32(&hcd->core_if->core_global_regs->gintmsk);
+    if (!intr_mask.b.sofintr) {
+        tr_type = dwc_otg_hcd_select_transactions(hcd);
+        if (tr_type != DWC_OTG_TRANSACTION_NONE) {
+            dwc_otg_hcd_queue_transactions(hcd, tr_type);
+        }
+    }
 }
 
 /**
@@ -1975,6 +1979,8 @@
 				     DWC_READ_REG32(&hcd->
 						    core_if->core_global_regs->
 						    gintsts));
+				halt_channel(hcd, hc, qtd,
+					     DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE);
 			}
 
 		}
diff --git a/drivers/usb/dwc_otg/dwc_otg_hcd_linux.c b/drivers/usb/dwc_otg/dwc_otg_hcd_linux.c
index 639321c..5c09aff 100644
--- a/drivers/usb/dwc_otg/dwc_otg_hcd_linux.c
+++ b/drivers/usb/dwc_otg/dwc_otg_hcd_linux.c
@@ -102,6 +102,19 @@
 
 /** @} */
 
+int comcerto_dwc_dummy_bus_suspend(struct usb_hcd *hcd)
+{
+	printk("\n comcerto_dwc_dummy_bus_suspend...");
+	return 0;
+}
+
+int comcerto_dwc_dummy_bus_resume(struct usb_hcd *hcd)
+{
+	printk("\n comcerto_dwc_dummy_bus_resume...");
+	return 0;
+}
+
+
 static struct hc_driver dwc_otg_hc_driver = {
 
 	.description = dwc_otg_hcd_name,
@@ -128,8 +141,8 @@
 
 	.hub_status_data = hub_status_data,
 	.hub_control = hub_control,
-	//.bus_suspend =
-	//.bus_resume =
+	.bus_suspend = comcerto_dwc_dummy_bus_suspend,
+	.bus_resume = comcerto_dwc_dummy_bus_resume,
 };
 
 /** Gets the dwc_otg_hcd from a struct usb_hcd */
diff --git a/drivers/usb/host/xhci-comcerto2000.c b/drivers/usb/host/xhci-comcerto2000.c
index ec765f0..de92ccc 100644
--- a/drivers/usb/host/xhci-comcerto2000.c
+++ b/drivers/usb/host/xhci-comcerto2000.c
@@ -138,8 +138,11 @@
 
         printk(KERN_INFO "### %s\n", __func__);
 
+#if defined(CONFIG_C2K_MFCN_EVM)
+	printk("%s: Reseting usb3...\n", __func__);
+	GPIO_reset_external_device(COMPONENT_USB_HUB, 0);
+#endif
 		/* Enable the USB 3.0 controller clock */
-
 		/* Get the usb3 clock structure  */
 		usb3_clk = clk_get(NULL,"usb1");
 
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 97fbe93..a6a9129 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -30,6 +30,16 @@
 #include <linux/statfs.h>
 #include <linux/compat.h>
 #include <linux/slab.h>
+
+#if defined(CONFIG_COMCERTO_IMPROVED_SPLICE)
+#include <linux/socket.h>
+#include <net/sock.h>
+#include <linux/net.h>
+#if defined(CONFIG_COMCERTO_SPLICE_USE_MDMA)
+#include <mach/hardware.h>
+#endif
+#endif
+
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -1718,6 +1728,173 @@
 	return ret;
 }
 
+#if defined(CONFIG_COMCERTO_IMPROVED_SPLICE)
+static ssize_t btrfs_splice_from_socket(struct file *file, struct socket *sock,
+					loff_t __user *ppos, size_t count)
+{
+	struct inode *inode = fdentry(file)->d_inode;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct page **pages = NULL;
+	struct kvec *iov = NULL;
+	struct msghdr msg;
+	long recvtimeo;
+	ssize_t copied = 0;
+	size_t offset, offset_tmp;
+	int num_pages, dirty_pages;
+	int err = 0;
+	loff_t start_pos;
+	loff_t pos = file->f_pos;
+	int i;
+	unsigned count_tmp = count;
+
+#define ERROR_OUT do {mutex_unlock(&inode->i_mutex); goto out;} while(0)
+
+	if (!count)
+		return 0;
+
+	if (ppos && copy_from_user(&pos, ppos, sizeof pos))
+		return -EFAULT;
+	offset = pos & (PAGE_CACHE_SIZE - 1);
+	num_pages = (offset + count + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+#if defined(CONFIG_COMCERTO_SPLICE_USE_MDMA)
+	if (num_pages > MDMA_OUTBOUND_BUF_DESC)
+		ERROR_OUT;
+#endif
+	start_pos = round_down(pos, root->sectorsize);
+
+	if (!(pages = kmalloc(num_pages * sizeof(struct page *), GFP_KERNEL)) ||
+		!(iov = kmalloc(num_pages * sizeof(*iov), GFP_KERNEL)))
+		ERROR_OUT;
+
+	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
+	current->backing_dev_info = inode->i_mapping->backing_dev_info;
+
+	mutex_lock(&inode->i_mutex);
+
+	if ((err = generic_write_checks(file, &pos, &count,
+					S_ISBLK(inode->i_mode))))
+		ERROR_OUT;
+
+	if ((err = file_remove_suid(file)))
+		ERROR_OUT;
+
+	if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+                err = -EROFS;
+		ERROR_OUT;
+	}
+
+	if ((err = btrfs_update_time(file)))
+		ERROR_OUT;
+
+	BTRFS_I(inode)->sequence++;
+	if (start_pos > i_size_read(inode) &&
+		(err = btrfs_cont_expand(inode, i_size_read(inode), start_pos)))
+		ERROR_OUT;
+
+	if ((err = btrfs_delalloc_reserve_space(inode,
+					num_pages << PAGE_CACHE_SHIFT)))
+		goto out_free;
+
+	if ((err = prepare_pages(root, file, pages, num_pages,
+					pos, pos >> PAGE_CACHE_SHIFT,
+					count, false))) {
+		btrfs_delalloc_release_space(inode,
+					num_pages << PAGE_CACHE_SHIFT);
+		goto out_free;
+	}
+
+	for (i = 0, offset_tmp = offset; i < num_pages; i++) {
+		unsigned bytes = PAGE_CACHE_SIZE - offset_tmp;
+
+		if (bytes > count_tmp)
+			bytes = count_tmp;
+		iov[i].iov_base = kmap(pages[i]) + offset_tmp;
+		iov[i].iov_len = bytes;
+		offset_tmp = 0;
+		count_tmp -= bytes;
+	}
+
+        /* IOV is ready, receive the date from socket now */
+	msg.msg_name = NULL;
+	msg.msg_namelen = 0;
+	msg.msg_iov = (struct iovec *)&iov[0];
+	msg.msg_iovlen = num_pages;
+	msg.msg_control = NULL;
+	msg.msg_controllen = 0;
+	msg.msg_flags = MSG_KERNSPACE;
+
+	recvtimeo = sock->sk->sk_rcvtimeo;
+	sock->sk->sk_rcvtimeo = 8 * HZ;
+	copied = kernel_recvmsg(sock, &msg, iov, num_pages, count,
+                             MSG_WAITALL | MSG_NOCATCHSIG);
+	sock->sk->sk_rcvtimeo = recvtimeo;
+
+	if (copied < 0) {
+		err = copied;
+		copied = 0;
+	}
+
+	/* FIXME:
+	 * The following results in at least one dirty_page even for copied==0
+	 * unless offset==0, but otherwise the fir page would be corrupted
+	 * for an unknown reason.
+	 */
+	dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >>
+					PAGE_CACHE_SHIFT;
+
+	for (i = 0; i < num_pages; i++)
+		kunmap(pages[i]);
+	if (dirty_pages < num_pages) {
+		if (1||dirty_pages) {
+			spin_lock(&BTRFS_I(inode)->lock);
+			BTRFS_I(inode)->outstanding_extents++;
+			spin_unlock(&BTRFS_I(inode)->lock);
+		}
+		btrfs_delalloc_release_space(inode,
+                                        (num_pages - dirty_pages) <<
+                                        PAGE_CACHE_SHIFT);
+	}
+
+	if (dirty_pages) {
+		if ((err = btrfs_dirty_pages(root, inode, pages,
+					dirty_pages, pos, copied, NULL))) {
+			btrfs_delalloc_release_space(inode,
+					dirty_pages << PAGE_CACHE_SHIFT);
+			btrfs_drop_pages(pages, num_pages);
+			goto out_free;
+                }
+	}
+
+	btrfs_drop_pages(pages, num_pages);
+	cond_resched();
+
+	balance_dirty_pages_ratelimited_nr(inode->i_mapping, dirty_pages);
+	if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
+		btrfs_btree_balance_dirty(root, 1);
+
+	pos += copied;
+
+out_free:
+	mutex_unlock(&inode->i_mutex);
+
+	if (copied > 0) {
+		file->f_pos = pos;
+		if (ppos && copy_to_user(ppos, &pos, sizeof *ppos))
+			err = -EFAULT;
+	}
+
+	BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
+	if (copied > 0 || err == -EIOCBQUEUED)
+		err = generic_write_sync(file, pos, copied);
+out:
+	kfree(iov);
+	kfree(pages);
+	current->backing_dev_info = NULL;
+
+	return err ? err : copied;
+}
+#endif
+
 static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
 {
 	struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -1879,6 +2056,9 @@
 	.write		= do_sync_write,
 	.aio_read       = generic_file_aio_read,
 	.splice_read	= generic_file_splice_read,
+#if defined(CONFIG_COMCERTO_IMPROVED_SPLICE)
+	.splice_from_socket	= btrfs_splice_from_socket,
+#endif
 	.aio_write	= btrfs_file_aio_write,
 	.mmap		= btrfs_file_mmap,
 	.open		= generic_file_open,
diff --git a/fs/splice.c b/fs/splice.c
index f8899bd..3b0d632 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -34,6 +34,10 @@
 #include <linux/socket.h>
 #include <linux/time.h>
 
+#include <net/sock.h>
+#include <linux/net.h>
+#include <linux/genalloc.h>
+
 /*
  * Attempt to steal a page from a pipe buffer. This should perhaps go into
  * a vm helper function, it's already simplified quite a bit by the
@@ -864,7 +868,8 @@
 	struct pipe_buffer *buf;
 	const struct pipe_buf_operations *ops;
 	int ret, ret2 = 0, remaining;
-	unsigned int curbuf, nrbufs, len, nrbufs_len, pos, offset, done;
+	unsigned int curbuf, nrbufs, len, nrbufs_len, done;
+	loff_t pos, offset;
 	struct file *file = sd->u.file;
 	struct address_space *mapping = file->f_mapping;
 	struct page **page;
@@ -916,7 +921,7 @@
 
 #if defined(CONFIG_COMCERTO_SPLICE_USE_MDMA)
 		// Is there a risk of getting the same page more than once (several buffers in a single page)?
-		ret = comcerto_dma_sg_add_input(sg, buf->page, buf->offset, buf->len, 0);
+		ret = comcerto_dma_sg_add_input(sg, page_address(buf->page) + buf->offset, buf->len, 0);
 		if (unlikely(ret)) {
 			printk(KERN_WARNING "%s: out of input bdescs\n", __func__);
 			break; //We will transfer what we could up to the previous buffer, based on nrbufs_len
@@ -967,7 +972,7 @@
 		goto err;		// We failed early, so we still have an easy way out
 
 #if defined(CONFIG_COMCERTO_SPLICE_USE_MDMA)
-	comcerto_dma_sg_add_output(sg, *page, offset, len, 1); //Don't check result since we should have at least one entry at this point
+	comcerto_dma_sg_add_output(sg, page_address(*page) + offset, len, 1); //Don't check result since we should have at least one entry at this point
 #endif
 
 	pos += len;
@@ -983,7 +988,7 @@
 			goto write_begin_done;
 
 #if defined(CONFIG_COMCERTO_SPLICE_USE_MDMA)
-		ret = comcerto_dma_sg_add_output(sg, *page, 0, PAGE_CACHE_SIZE, 1);
+		ret = comcerto_dma_sg_add_output(sg, page_address(*page), PAGE_CACHE_SIZE, 1);
 		if (unlikely(ret)) {
 			pagecache_write_end(file, mapping, pos, PAGE_CACHE_SIZE, 0, *page, *fsdata);
 			goto write_begin_done;
@@ -1003,7 +1008,7 @@
 			goto write_begin_done;
 
 #if defined(CONFIG_COMCERTO_SPLICE_USE_MDMA)
-		ret = comcerto_dma_sg_add_output(sg, *page, 0, remaining, 1);
+		ret = comcerto_dma_sg_add_output(sg, page_address(*page), remaining, 1);
 		if (unlikely(ret)) {
 			pagecache_write_end(file, mapping, pos, remaining, 0, *page, *fsdata);
 			goto write_begin_done;
@@ -2177,30 +2182,39 @@
 		int, fd_out, loff_t __user *, off_out,
 		size_t, len, unsigned int, flags)
 {
-	long error;
-	struct file *in, *out;
+	int error = -EBADF;
+	struct file *in, *out = NULL;
 	int fput_in, fput_out;
+	struct socket *sock = NULL;
 
 	if (unlikely(!len))
 		return 0;
 
-	error = -EBADF;
-	in = fget_light(fd_in, &fput_in);
-	if (in) {
-		if (in->f_mode & FMODE_READ) {
-			out = fget_light(fd_out, &fput_out);
-			if (out) {
-				if (out->f_mode & FMODE_WRITE)
-					error = do_splice(in, off_in,
-							  out, off_out,
-							  len, flags);
-				fput_light(out, fput_out);
-			}
-		}
+	if (!(out = fget_light(fd_out, &fput_out)))
+		return -EBADF;
 
+	if (!(out->f_mode & FMODE_WRITE))
+		goto out;
+
+	/* Check if fd_in is a socket while out_fd is NOT a pipe. */
+	if (!get_pipe_info(out) &&
+		(sock = sockfd_lookup(fd_in, &error))) {
+#if defined(CONFIG_COMCERTO_IMPROVED_SPLICE)
+		if (sock->sk && out->f_op->splice_from_socket)
+			error = out->f_op->splice_from_socket(out, sock,
+								off_out, len);
+#endif
+		fput(sock->file);
+	} else
+	{
+		if (!(in = fget_light(fd_in, &fput_in)))
+			goto out;
+		if ((in->f_mode & FMODE_READ))
+			error = do_splice(in, off_in, out, off_out, len, flags);
 		fput_light(in, fput_in);
 	}
-
+out:
+	fput_light(out, fput_out);
 	return error;
 }
 
@@ -2210,7 +2224,7 @@
  */
 static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
 {
-	int ret;
+	int ret = 0;
 
 	/*
 	 * Check ->nrbufs without the inode lock first. This function
@@ -2219,7 +2233,6 @@
 	if (pipe->nrbufs)
 		return 0;
 
-	ret = 0;
 	pipe_lock(pipe);
 
 	while (!pipe->nrbufs) {
@@ -2248,7 +2261,7 @@
  */
 static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
 {
-	int ret;
+	int ret = 0;
 
 	/*
 	 * Check ->nrbufs without the inode lock first. This function
@@ -2257,7 +2270,6 @@
 	if (pipe->nrbufs < pipe->buffers)
 		return 0;
 
-	ret = 0;
 	pipe_lock(pipe);
 
 	while (pipe->nrbufs >= pipe->buffers) {
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index fc8a3ff..d756d92 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -26,6 +26,23 @@
 	     (bit) < (size); \
 	     (bit) = find_next_bit((addr), (size), (bit) + 1))
 
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_from(bit, addr, size) \
+	for ((bit) = find_next_bit((addr), (size), (bit));      \
+		(bit) < (size);                                    \
+		(bit) = find_next_bit((addr), (size), (bit) + 1))
+
+#define for_each_clear_bit(bit, addr, size) \
+	for ((bit) = find_first_zero_bit((addr), (size));       \
+		(bit) < (size);                                    \
+		(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_clear_bit() but use bit as value to start with */
+#define for_each_clear_bit_from(bit, addr, size) \
+	for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
+		(bit) < (size);                                    \
+		(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+		
 static __inline__ int get_bitmask_order(unsigned int count)
 {
 	int order;
diff --git a/include/linux/cryptodev.h b/include/linux/cryptodev.h
new file mode 100644
index 0000000..9185082
--- /dev/null
+++ b/include/linux/cryptodev.h
@@ -0,0 +1,561 @@
+/*	$FreeBSD: src/sys/opencrypto/cryptodev.h,v 1.25 2007/05/09 19:37:02 gnn Exp $	*/
+/*	$OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $	*/
+
+/*-
+ * Linux port done by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2010 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ * The license and original author are listed below.
+ *
+ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
+ * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
+ *
+ * This code was written by Angelos D. Keromytis in Athens, Greece, in
+ * February 2000. Network Security Technologies Inc. (NSTI) kindly
+ * supported the development of this code.
+ *
+ * Copyright (c) 2000 Angelos D. Keromytis
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all source code copies of any software which is or includes a copy or
+ * modification of this software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ *
+ * Copyright (c) 2001 Theo de Raadt
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+#ifndef _CRYPTO_CRYPTO_H_
+#define _CRYPTO_CRYPTO_H_
+
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
+#include <linux/in.h>
+#endif
+
+
+/* Some initial values */
+#define CRYPTO_DRIVERS_INITIAL	4
+#define CRYPTO_SW_SESSIONS	32
+
+/* Hash values */
+#define NULL_HASH_LEN		0
+#define MD5_HASH_LEN		16
+#define SHA1_HASH_LEN		20
+#define RIPEMD160_HASH_LEN	20
+#define SHA2_256_HASH_LEN	32
+#define SHA2_384_HASH_LEN	48
+#define SHA2_512_HASH_LEN	64
+#define MD5_KPDK_HASH_LEN	16
+#define SHA1_KPDK_HASH_LEN	20
+/* Maximum hash algorithm result length */
+#define HASH_MAX_LEN		SHA2_512_HASH_LEN /* Keep this updated */
+
+/* HMAC values */
+#define NULL_HMAC_BLOCK_LEN			1
+#define MD5_HMAC_BLOCK_LEN			64
+#define SHA1_HMAC_BLOCK_LEN			64
+#define RIPEMD160_HMAC_BLOCK_LEN	64
+#define SHA2_256_HMAC_BLOCK_LEN		64
+#define SHA2_384_HMAC_BLOCK_LEN		128
+#define SHA2_512_HMAC_BLOCK_LEN		128
+/* Maximum HMAC block length */
+#define HMAC_MAX_BLOCK_LEN		SHA2_512_HMAC_BLOCK_LEN /* Keep this updated */
+#define HMAC_IPAD_VAL			0x36
+#define HMAC_OPAD_VAL			0x5C
+
+/* Encryption algorithm block sizes */
+#define NULL_BLOCK_LEN			1
+#define DES_BLOCK_LEN			8
+#define DES3_BLOCK_LEN			8
+#define BLOWFISH_BLOCK_LEN		8
+#define SKIPJACK_BLOCK_LEN		8
+#define CAST128_BLOCK_LEN		8
+#define RIJNDAEL128_BLOCK_LEN	16
+#define AES_BLOCK_LEN			RIJNDAEL128_BLOCK_LEN
+#define CAMELLIA_BLOCK_LEN		16
+#define ARC4_BLOCK_LEN			1
+#define EALG_MAX_BLOCK_LEN		AES_BLOCK_LEN /* Keep this updated */
+
+/* Encryption algorithm min and max key sizes */
+#define NULL_MIN_KEY_LEN		0
+#define NULL_MAX_KEY_LEN		0
+#define DES_MIN_KEY_LEN			8
+#define DES_MAX_KEY_LEN			8
+#define DES3_MIN_KEY_LEN		24
+#define DES3_MAX_KEY_LEN		24
+#define BLOWFISH_MIN_KEY_LEN	4
+#define BLOWFISH_MAX_KEY_LEN	56
+#define SKIPJACK_MIN_KEY_LEN	10
+#define SKIPJACK_MAX_KEY_LEN	10
+#define CAST128_MIN_KEY_LEN		5
+#define CAST128_MAX_KEY_LEN		16
+#define RIJNDAEL128_MIN_KEY_LEN	16
+#define RIJNDAEL128_MAX_KEY_LEN	32
+#define AES_MIN_KEY_LEN			RIJNDAEL128_MIN_KEY_LEN
+#define AES_MAX_KEY_LEN			RIJNDAEL128_MAX_KEY_LEN
+#define CAMELLIA_MIN_KEY_LEN	16
+#define CAMELLIA_MAX_KEY_LEN	32
+#define ARC4_MIN_KEY_LEN		1
+#define ARC4_MAX_KEY_LEN		256
+
+/* Max size of data that can be processed */
+#define CRYPTO_MAX_DATA_LEN		64*1024 - 1
+
+#define CRYPTO_ALGORITHM_MIN	1
+#define CRYPTO_DES_CBC			1
+#define CRYPTO_3DES_CBC			2
+#define CRYPTO_BLF_CBC			3
+#define CRYPTO_CAST_CBC			4
+#define CRYPTO_SKIPJACK_CBC		5
+#define CRYPTO_MD5_HMAC			6
+#define CRYPTO_SHA1_HMAC		7
+#define CRYPTO_RIPEMD160_HMAC	8
+#define CRYPTO_MD5_KPDK			9
+#define CRYPTO_SHA1_KPDK		10
+#define CRYPTO_RIJNDAEL128_CBC	11 /* 128 bit blocksize */
+#define CRYPTO_AES_CBC			11 /* 128 bit blocksize -- the same as above */
+#define CRYPTO_ARC4				12
+#define CRYPTO_MD5				13
+#define CRYPTO_SHA1				14
+#define CRYPTO_NULL_HMAC		15
+#define CRYPTO_NULL_CBC			16
+#define CRYPTO_DEFLATE_COMP		17 /* Deflate compression algorithm */
+#define CRYPTO_SHA2_256_HMAC	18
+#define CRYPTO_SHA2_384_HMAC	19
+#define CRYPTO_SHA2_512_HMAC	20
+#define CRYPTO_CAMELLIA_CBC		21
+#define CRYPTO_SHA2_256			22
+#define CRYPTO_SHA2_384			23
+#define CRYPTO_SHA2_512			24
+#define CRYPTO_RIPEMD160		25
+#define	CRYPTO_LZS_COMP			26
+#define CRYPTO_ESP_RFC2406 		27
+//#define CRYPTO_ESP_RFC2406_TRANSPORT 20
+#define CRYPTO_ESP_RFC4303  		28
+#define CRYPTO_ESP4_RFC4303  		28
+#define CRYPTO_ESP6_RFC4303  		29
+#define CRYPTO_AH			30
+#define CRYPTO_AH4			30
+#define CRYPTO_AH6			31
+#define CRYPTO_SHA2_HMAC		32 /*TODO is it a duplicate entry*/
+#define CRYPTO_ALGORITHM_MAX		32 /* Keep updated - see below */
+
+/* Algorithm flags */
+#define CRYPTO_ALG_FLAG_SUPPORTED	0x01 /* Algorithm is supported */
+#define CRYPTO_ALG_FLAG_RNG_ENABLE	0x02 /* Has HW RNG for DH/DSA */
+#define CRYPTO_ALG_FLAG_DSA_SHA		0x04 /* Can do SHA on msg */
+
+/*
+ * Crypto driver/device flags.  They can set in the crid
+ * parameter when creating a session or submitting a key
+ * op to affect the device/driver assigned.  If neither
+ * of these are specified then the crid is assumed to hold
+ * the driver id of an existing (and suitable) device that
+ * must be used to satisfy the request.
+ */
+#define CRYPTO_FLAG_HARDWARE	0x01000000	/* hardware accelerated */
+#define CRYPTO_FLAG_SOFTWARE	0x02000000	/* software implementation */
+
+/* NB: deprecated */
+struct session_op {
+	u_int32_t	cipher;		/* ie. CRYPTO_DES_CBC */
+	u_int32_t	mac;		/* ie. CRYPTO_MD5_HMAC */
+
+	u_int32_t	keylen;		/* cipher key */
+	caddr_t		key;
+	int		mackeylen;	/* mac key */
+	caddr_t		mackey;
+
+  	u_int32_t	ses;		/* returns: session # */ 
+};
+
+struct session2_op {
+	u_int32_t	cipher;		/* ie. CRYPTO_DES_CBC */
+	u_int32_t	mac;		/* ie. CRYPTO_MD5_HMAC */
+
+	u_int32_t	keylen;		/* cipher key */
+	caddr_t		key;
+	int		mackeylen;	/* mac key */
+	caddr_t		mackey;
+
+  	u_int32_t	ses;		/* returns: session # */ 
+	int		crid;		/* driver id + flags (rw) */
+	int		pad[4];		/* for future expansion */
+};
+
+struct crypt_op {
+	u_int32_t	ses;
+	u_int16_t	op;		/* i.e. COP_ENCRYPT */
+#define COP_NONE	0
+#define COP_ENCRYPT	1
+#define COP_DECRYPT	2
+	u_int16_t	flags;
+#define	COP_F_BATCH	0x0008		/* Batch op if possible */
+	u_int		len;
+	caddr_t		src, dst;	/* become iov[] inside kernel */
+	caddr_t		mac;		/* must be big enough for chosen MAC */
+	caddr_t		iv;
+};
+
+/*
+ * Parameters for looking up a crypto driver/device by
+ * device name or by id.  The latter are returned for
+ * created sessions (crid) and completed key operations.
+ */
+struct crypt_find_op {
+	int		crid;		/* driver id + flags */
+	char		name[32];	/* device/driver name */
+};
+
+/* bignum parameter, in packed bytes, ... */
+struct crparam {
+	caddr_t		crp_p;
+	u_int		crp_nbits;
+};
+
+#define CRK_MAXPARAM	8
+
+struct crypt_kop {
+	u_int		crk_op;		/* ie. CRK_MOD_EXP or other */
+	u_int		crk_status;	/* return status */
+	u_short		crk_iparams;	/* # of input parameters */
+	u_short		crk_oparams;	/* # of output parameters */
+	u_int		crk_crid;	/* NB: only used by CIOCKEY2 (rw) */
+	struct crparam	crk_param[CRK_MAXPARAM];
+};
+#define CRK_ALGORITM_MIN	0
+#define CRK_MOD_EXP		0
+#define CRK_MOD_EXP_CRT		1
+#define CRK_DSA_SIGN		2
+#define CRK_DSA_VERIFY		3
+#define CRK_DH_COMPUTE_KEY	4
+#define CRK_ALGORITHM_MAX	4 /* Keep updated - see below */
+
+#define CRF_MOD_EXP		(1 << CRK_MOD_EXP)
+#define CRF_MOD_EXP_CRT		(1 << CRK_MOD_EXP_CRT)
+#define CRF_DSA_SIGN		(1 << CRK_DSA_SIGN)
+#define CRF_DSA_VERIFY		(1 << CRK_DSA_VERIFY)
+#define CRF_DH_COMPUTE_KEY	(1 << CRK_DH_COMPUTE_KEY)
+
+/*
+ * done against open of /dev/crypto, to get a cloned descriptor.
+ * Please use F_SETFD against the cloned descriptor.
+ */
+#define CRIOGET		_IOWR('c', 100, u_int32_t)
+#define CRIOASYMFEAT	CIOCASYMFEAT
+#define CRIOFINDDEV	CIOCFINDDEV
+
+/* the following are done against the cloned descriptor */
+#define CIOCGSESSION	_IOWR('c', 101, struct session_op)
+#define CIOCFSESSION	_IOW('c', 102, u_int32_t)
+#define CIOCCRYPT	_IOWR('c', 103, struct crypt_op)
+#define CIOCKEY		_IOWR('c', 104, struct crypt_kop)
+#define CIOCASYMFEAT	_IOR('c', 105, u_int32_t)
+#define CIOCGSESSION2	_IOWR('c', 106, struct session2_op)
+#define CIOCKEY2	_IOWR('c', 107, struct crypt_kop)
+#define CIOCFINDDEV	_IOWR('c', 108, struct crypt_find_op)
+
+struct cryptotstat {
+	struct timespec	acc;		/* total accumulated time */
+	struct timespec	min;		/* min time */
+	struct timespec	max;		/* max time */
+	u_int32_t	count;		/* number of observations */
+};
+
+struct cryptostats {
+	u_int32_t	cs_ops;		/* symmetric crypto ops submitted */
+	u_int32_t	cs_errs;	/* symmetric crypto ops that failed */
+	u_int32_t	cs_kops;	/* asymetric/key ops submitted */
+	u_int32_t	cs_kerrs;	/* asymetric/key ops that failed */
+	u_int32_t	cs_intrs;	/* crypto swi thread activations */
+	u_int32_t	cs_rets;	/* crypto return thread activations */
+	u_int32_t	cs_blocks;	/* symmetric op driver block */
+	u_int32_t	cs_kblocks;	/* symmetric op driver block */
+	/*
+	 * When CRYPTO_TIMING is defined at compile time and the
+	 * sysctl debug.crypto is set to 1, the crypto system will
+	 * accumulate statistics about how long it takes to process
+	 * crypto requests at various points during processing.
+	 */
+	struct cryptotstat cs_invoke;	/* crypto_dipsatch -> crypto_invoke */
+	struct cryptotstat cs_done;	/* crypto_invoke -> crypto_done */
+	struct cryptotstat cs_cb;	/* crypto_done -> callback */
+	struct cryptotstat cs_finis;	/* callback -> callback return */
+
+	u_int32_t	cs_drops;		/* crypto ops dropped due to congestion */
+};
+
+#ifdef __KERNEL__
+
+/* Standard initialization structure beginning */
+struct cryptoini {
+	int		cri_alg;	/* Algorithm to use */
+	int		cri_flags;
+	union {
+		struct {
+			int		cri_mlen;	/* Number of bytes we want from the
+					   entire hash. 0 means all. */
+			int			cri_klen;	/* Key length, in bits */
+			caddr_t		cri_key;	/* key to use */
+			u_int8_t	cri_iv[EALG_MAX_BLOCK_LEN];	/* IV to use */
+		} cri_alg;
+		struct {
+			u_int32_t basealg;
+			struct sockaddr_in tun_source;
+			struct sockaddr_in tun_destination;
+			int tun_df_mode;
+			int tun_ds_mode;
+		 	int tun_ttl_value;
+		 	int tun_replay_windowsize;
+		 	int spivalue ;
+		 	int replayinit;  /* set to 0 to disable replay on receive */
+		 	u_int64_t time_hard_lifetime;
+		 	u_int64_t time_soft_lifetime;
+		 	u_int64_t byte_hard_lifetime;
+		 	u_int64_t byte_soft_lifetime;
+		} cri_pack;	
+	} u;
+	struct cryptoini *cri_next;
+};
+#define cri_mlen		u.cri_alg.cri_mlen
+#define cri_klen		u.cri_alg.cri_klen
+#define cri_key			u.cri_alg.cri_key
+#define cri_iv			u.cri_alg.cri_iv
+#define crip_basealg			u.cri_pack.basealg
+#define crip_tun_source 		u.cri_pack.tun_source
+#define crip_tun_destination	u.cri_pack.tun_destination
+#define crip_tun_df_mode		u.cri_pack.tun_df_mode
+#define crip_tun_ds_mode	u.cri_pack.tun_ds_mode
+#define crip_tun_ttl_value	u.cri_pack.tun_ttl_value
+#define crip_tun_replay_windowsize u.cri_pack.tun_replay_windowsize
+#define crip_spivalue 		u.cri_pack.spivalue
+#define crip_replayinit		u.cri_pack.replayinit
+#define crip_time_hard_lifetime 	 u.cri_pack.time_hard_lifetime
+#define crip_time_soft_lifetime 	 u.cri_pack.time_soft_lifetime
+#define crip_byte_hard_lifetime 	 u.cri_pack.byte_hard_lifetime
+#define crip_byte_soft_lifetime 	 u.cri_pack.byte_soft_lifetime
+
+/* Describe boundaries of a single crypto operation */
+struct cryptodesc {
+	int		crd_skip;	/* How many bytes to ignore from start */
+	int		crd_len;	/* How many bytes to process */
+	int		crd_inject;	/* Where to inject results, if applicable */
+	int		crd_flags;
+
+#define CRD_F_ENCRYPT		0x01	/* Set when doing encryption */
+#define CRD_F_IV_PRESENT	0x02	/* When encrypting, IV is already in
+					   place, so don't copy. */
+#define CRD_F_IV_EXPLICIT	0x04	/* IV explicitly provided */
+#define CRD_F_DSA_SHA_NEEDED	0x08	/* Compute SHA-1 of buffer for DSA */
+#define CRD_F_KEY_EXPLICIT	0x10	/* Key explicitly provided */
+#define CRD_F_COMP		0x0f    /* Set when doing compression */
+
+	struct cryptoini	CRD_INI; /* Initialization/context data */
+#define crd_iv		CRD_INI.cri_iv
+#define crd_key		CRD_INI.cri_key
+#define crd_alg		CRD_INI.cri_alg
+#define crd_klen	CRD_INI.cri_klen
+#define crd_mlen	CRD_INI.cri_mlen
+
+	struct cryptodesc *crd_next;
+};
+
+/* Structure describing complete operation */
+struct cryptop {
+	struct list_head crp_next;
+	wait_queue_head_t crp_waitq;
+
+	u_int64_t	crp_sid;	/* Session ID */
+	int		crp_ilen;	/* Input data total length */
+	int		crp_olen;	/* Result total length */
+
+	int		crp_etype;	/*
+					 * Error type (zero means no error).
+					 * All error codes except EAGAIN
+					 * indicate possible data corruption (as in,
+					 * the data have been touched). On all
+					 * errors, the crp_sid may have changed
+					 * (reset to a new one), so the caller
+					 * should always check and use the new
+					 * value on future requests.
+					 */
+	int		crp_flags;
+
+#define CRYPTO_F_SKBUF		0x0001	/* Input/output are skbuf chains */
+#define CRYPTO_F_IOV		0x0002	/* Input/output are uio */
+#define CRYPTO_F_REL		0x0004	/* Must return data in same place */
+#define CRYPTO_F_BATCH		0x0008	/* Batch op if possible */
+#define CRYPTO_F_CBIMM		0x0010	/* Do callback immediately */
+#define CRYPTO_F_DONE		0x0020	/* Operation completed */
+#define CRYPTO_F_CBIFSYNC	0x0040	/* Do CBIMM if op is synchronous */
+
+	caddr_t		crp_buf;	/* Data to be processed */
+	caddr_t		crp_out_buf;	/* Crypto Result Buffer */
+	caddr_t		crp_opaque;	/* Opaque pointer, passed along */
+	struct cryptodesc *crp_desc;	/* Linked list of processing descriptors */
+
+	int (*crp_callback)(struct cryptop *); /* Callback function */
+};
+enum crypto_packet_return_code {
+		CRYPTO_OK=0,
+		CRYPTO_SOFT_TTL = 2,
+ 		CRYPTO_HARD_TTL,
+ 		CRYPTO_SA_INACTIVE,
+ 		CRYPTO_REPLAY,
+ 		CRYPTO_ICV_FAIL,
+ 		CRYPTO_SEQ_ROLL,
+ 		CRYPTO_MEM_ERROR,
+ 		CRYPTO_VERS_ERROR,
+ 		CRYPTO_PROT_ERROR,
+ 		CRYPTO_PYLD_ERROR,
+ 		CRYPTO_PAD_ERROR 
+};
+
+enum crypto_accel_type {
+                  CRYPTO_PACKET  =0x2,    /* OR together desired bits */
+                  CRYPTO_HARDWARE=0x1,
+                  CRYPTO_SOFTWARE=0x0
+};
+
+enum crypto_flags {
+                  CRYPTO_ENCRYPT=0x1, 	// same for encap (OCF l2)
+                  CRYPTO_DECRYPT=0x2,		// same for decap (OCF l2)
+                  CRYPTO_MAC_GEN=0x4,
+                  CRYPTO_MAC_CHECK=0x08,
+                  CRYPTO_COMPRESS_SMALLER=0x10,
+                  CRYPTO_COMPRESS_BIGGER=0x20
+};
+
+#define CRYPTO_BUF_CONTIG	0x0
+#define CRYPTO_BUF_IOV		0x1
+#define CRYPTO_BUF_SKBUF		0x2
+
+#define CRYPTO_OP_DECRYPT	0x0
+#define CRYPTO_OP_ENCRYPT	0x1
+
+/*
+ * Hints passed to process methods.
+ */
+#define CRYPTO_HINT_MORE	0x1	/* more ops coming shortly */
+
+struct cryptkop {
+	struct list_head krp_next;
+	wait_queue_head_t krp_waitq;
+
+	int		krp_flags;
+#define CRYPTO_KF_DONE		0x0001	/* Operation completed */
+#define CRYPTO_KF_CBIMM		0x0002	/* Do callback immediately */
+
+	u_int		krp_op;		/* ie. CRK_MOD_EXP or other */
+	u_int		krp_status;	/* return status */
+	u_short		krp_iparams;	/* # of input parameters */
+	u_short		krp_oparams;	/* # of output parameters */
+	u_int		krp_crid;	/* desired device, etc. */
+	u_int32_t	krp_hid;
+	struct crparam	krp_param[CRK_MAXPARAM];	/* kvm */
+	int		(*krp_callback)(struct cryptkop *);
+};
+
+#include <linux/ocf-compat.h>
+
+/*
+ * Session ids are 64 bits.  The lower 32 bits contain a "local id" which
+ * is a driver-private session identifier.  The upper 32 bits contain a
+ * "hardware id" used by the core crypto code to identify the driver and
+ * a copy of the driver's capabilities that can be used by client code to
+ * optimize operation.
+ */
+#define CRYPTO_SESID2HID(_sid)	(((_sid) >> 32) & 0x00ffffff)
+#define CRYPTO_SESID2CAPS(_sid)	(((_sid) >> 32) & 0xff000000)
+#define CRYPTO_SESID2LID(_sid)	(((u_int32_t) (_sid)) & 0xffffffff)
+
+extern	int crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard);
+extern	int crypto_freesession(u_int64_t sid);
+#define CRYPTOCAP_F_HARDWARE	CRYPTO_FLAG_HARDWARE
+#define CRYPTOCAP_F_SOFTWARE	CRYPTO_FLAG_SOFTWARE
+#define CRYPTOCAP_F_SYNC	0x04000000	/* operates synchronously */
+extern	int32_t crypto_get_driverid(device_t dev, int flags);
+extern	int crypto_find_driver(const char *);
+extern	device_t crypto_find_device_byhid(int hid);
+extern	int crypto_getcaps(int hid);
+extern	int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
+	    u_int32_t flags);
+extern	int crypto_kregister(u_int32_t, int, u_int32_t);
+extern	int crypto_unregister(u_int32_t driverid, int alg);
+extern	int crypto_unregister_all(u_int32_t driverid);
+extern	int crypto_dispatch(struct cryptop *crp);
+extern	int crypto_kdispatch(struct cryptkop *);
+#define CRYPTO_SYMQ	0x1
+#define CRYPTO_ASYMQ	0x2
+extern	int crypto_unblock(u_int32_t, int);
+extern	void crypto_done(struct cryptop *crp);
+extern	void crypto_kdone(struct cryptkop *);
+extern	int crypto_getfeat(int *);
+
+extern	void crypto_freereq(struct cryptop *crp);
+extern	struct cryptop *crypto_getreq(int num);
+
+extern  int crypto_usercrypto;      /* userland may do crypto requests */
+extern  int crypto_userasymcrypto;  /* userland may do asym crypto reqs */
+extern  int crypto_devallowsoft;    /* only use hardware crypto */
+
+/*
+ * random number support,  crypto_unregister_all will unregister
+ */
+extern int crypto_rregister(u_int32_t driverid,
+		int (*read_random)(void *arg, u_int32_t *buf, int len), void *arg);
+extern int crypto_runregister_all(u_int32_t driverid);
+
+/*
+ * Crypto-related utility routines used mainly by drivers.
+ *
+ * XXX these don't really belong here; but for now they're
+ *     kept apart from the rest of the system.
+ */
+struct uio;
+extern	void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp);
+extern	void cuio_copyback(struct uio* uio, int off, int len, caddr_t cp);
+extern	struct iovec *cuio_getptr(struct uio *uio, int loc, int *off);
+
+extern	void crypto_copyback(int flags, caddr_t buf, int off, int size,
+	    caddr_t in);
+extern	void crypto_copydata(int flags, caddr_t buf, int off, int size,
+	    caddr_t out);
+extern	int crypto_apply(int flags, caddr_t buf, int off, int len,
+	    int (*f)(void *, void *, u_int), void *arg);
+
+#endif /* __KERNEL__ */
+#endif /* _CRYPTO_CRYPTO_H_ */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c8a9e40..2573b1e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -410,6 +410,9 @@
 struct vm_area_struct;
 struct vfsmount;
 struct cred;
+#if defined(CONFIG_COMCERTO_IMPROVED_SPLICE)
+struct socket;
+#endif
 
 extern void __init inode_init(void);
 extern void __init inode_init_early(void);
@@ -1615,6 +1618,10 @@
 	int (*flock) (struct file *, int, struct file_lock *);
 	ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
 	ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
+#if defined(CONFIG_COMCERTO_IMPROVED_SPLICE)
+	ssize_t (*splice_from_socket)(struct file *file, struct socket *sock,
+				     loff_t __user *ppos, size_t count);
+#endif
 	int (*setlease)(struct file *, long, struct file_lock **);
 	long (*fallocate)(struct file *file, int mode, loff_t offset,
 			  loff_t len);
diff --git a/include/linux/if.h b/include/linux/if.h
index db20bd4..d8498f6 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -52,6 +52,9 @@
 #define IFF_DORMANT	0x20000		/* driver signals dormant	*/
 
 #define IFF_ECHO	0x40000		/* echo sent packets		*/
+#if defined(CONFIG_ARCH_COMCERTO)
+#define IFF_WIFI_OFLD 	0x80000		/* Offload interface		*/
+#endif
 
 #define IFF_VOLATILE	(IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\
 		IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index dd3f201..7a8bb26 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -106,6 +106,22 @@
 typedef int br_should_route_hook_t(struct sk_buff *skb);
 extern br_should_route_hook_t __rcu *br_should_route_hook;
 
+#if defined(CONFIG_ARCH_COMCERTO)
+struct brevent_fdb_update{
+	char * mac_addr;
+	struct net_device * dev;
+};
+
+enum brevent_notif_type {
+	BREVENT_PORT_DOWN = 1,	/* arg is struct net_device ptr */
+	BREVENT_FDB_UPDATE	/* arg is struct brevent_fdb_update ptr */
+};
+
+int register_brevent_notifier(struct notifier_block *nb);
+int unregister_brevent_notifier(struct notifier_block *nb);
+int call_brevent_notifiers(unsigned long val, void *v);
+#endif
+
 #endif
 
 #endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3fa93b1..b11401d 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1005,6 +1005,11 @@
 	/* mask of features inheritable by VLAN devices */
 	u32			vlan_features;
 
+#if defined(CONFIG_ARCH_COMCERTO)
+	/* This is pointing to network device that offload WiFi data to PFE */
+	struct net_device 	*wifi_offload_dev;
+#endif
+
 	/* Net device feature bits; if you change something,
 	 * also update netdev_features_strings[] in ethtool.c */
 
@@ -1620,6 +1625,9 @@
 extern int		dev_close(struct net_device *dev);
 extern void		dev_disable_lro(struct net_device *dev);
 extern int		dev_queue_xmit(struct sk_buff *skb);
+#if defined(CONFIG_ARCH_COMCERTO)
+extern int 		original_dev_queue_xmit(struct sk_buff *skb);
+#endif
 extern int		register_netdevice(struct net_device *dev);
 extern void		unregister_netdevice_queue(struct net_device *dev,
 						   struct list_head *head);
diff --git a/include/linux/netfilter_ipv6/ip6t_NPT.h b/include/linux/netfilter_ipv6/ip6t_NPT.h
old mode 100755
new mode 100644
diff --git a/include/linux/ocf-compat.h b/include/linux/ocf-compat.h
new file mode 100644
index 0000000..4ad1223
--- /dev/null
+++ b/include/linux/ocf-compat.h
@@ -0,0 +1,372 @@
+#ifndef _BSD_COMPAT_H_
+#define _BSD_COMPAT_H_ 1
+/****************************************************************************/
+/*
+ * Provide compat routines for older linux kernels and BSD kernels
+ *
+ * Written by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2010 David McCullough <david_mccullough@mcafee.com>
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software in both source and binary
+ * form is allowed (with or without changes) provided that:
+ *
+ *   1. distributions of this source code include the above copyright
+ *      notice, this list of conditions and the following disclaimer;
+ *
+ *   2. distributions in binary form include the above copyright
+ *      notice, this list of conditions and the following disclaimer
+ *      in the documentation and/or other associated materials;
+ *
+ *   3. the copyright holder's name is not used to endorse products
+ *      built using this software without specific written permission.
+ *
+ * ALTERNATIVELY, provided that this notice is retained in full, this file
+ * may be distributed under the terms of the GNU General Public License (GPL),
+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ */
+/****************************************************************************/
+#ifdef __KERNEL__
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+
+/*
+ * fake some BSD driver interface stuff specifically for OCF use
+ */
+
+typedef struct ocf_device *device_t;
+
+typedef struct {
+	int (*cryptodev_newsession)(device_t dev, u_int32_t *sidp, struct cryptoini *cri);
+	int (*cryptodev_freesession)(device_t dev, u_int64_t tid);
+	int (*cryptodev_process)(device_t dev, struct cryptop *crp, int hint);
+	int (*cryptodev_kprocess)(device_t dev, struct cryptkop *krp, int hint);
+} device_method_t;
+#define DEVMETHOD(id, func)	id: func
+
+struct ocf_device {
+	char name[32];		/* the driver name */
+	char nameunit[32];	/* the driver name + HW instance */
+	int  unit;
+	device_method_t	methods;
+	void *softc;
+};
+
+#define CRYPTODEV_NEWSESSION(dev, sid, cri) \
+	((*(dev)->methods.cryptodev_newsession)(dev,sid,cri))
+#define CRYPTODEV_FREESESSION(dev, sid) \
+	((*(dev)->methods.cryptodev_freesession)(dev, sid))
+#define CRYPTODEV_PROCESS(dev, crp, hint) \
+	((*(dev)->methods.cryptodev_process)(dev, crp, hint))
+#define CRYPTODEV_KPROCESS(dev, krp, hint) \
+	((*(dev)->methods.cryptodev_kprocess)(dev, krp, hint))
+
+#define device_get_name(dev)	((dev)->name)
+#define device_get_nameunit(dev)	((dev)->nameunit)
+#define device_get_unit(dev)	((dev)->unit)
+#define device_get_softc(dev)	((dev)->softc)
+
+#define	softc_device_decl \
+		struct ocf_device _device; \
+		device_t
+
+#define	softc_device_init(_sc, _name, _unit, _methods) \
+	if (1) {\
+	strncpy((_sc)->_device.name, _name, sizeof((_sc)->_device.name) - 1); \
+	snprintf((_sc)->_device.nameunit, sizeof((_sc)->_device.name), "%s%d", _name, _unit); \
+	(_sc)->_device.unit = _unit; \
+	(_sc)->_device.methods = _methods; \
+	(_sc)->_device.softc = (void *) _sc; \
+	*(device_t *)((softc_get_device(_sc))+1) = &(_sc)->_device; \
+	} else
+
+#define	softc_get_device(_sc)	(&(_sc)->_device)
+
+/*
+ * iomem support for 2.4 and 2.6 kernels
+ */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define ocf_iomem_t	unsigned long
+
+/*
+ * implement simple workqueue like support for older kernels
+ */
+
+#include <linux/tqueue.h>
+
+#define work_struct tq_struct
+
+#define INIT_WORK(wp, fp, ap) \
+	do { \
+		(wp)->sync = 0; \
+		(wp)->routine = (fp); \
+		(wp)->data = (ap); \
+	} while (0)
+
+#define schedule_work(wp) \
+	do { \
+		queue_task((wp), &tq_immediate); \
+		mark_bh(IMMEDIATE_BH); \
+	} while (0)
+
+#define flush_scheduled_work()	run_task_queue(&tq_immediate)
+
+#else
+#define ocf_iomem_t	void __iomem *
+
+#include <linux/workqueue.h>
+
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/fdtable.h>
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
+#define files_fdtable(files)	(files)
+#endif
+
+#ifdef MODULE_PARM
+#undef module_param	/* just in case */
+#define	module_param(a,b,c)		MODULE_PARM(a,"i")
+#endif
+
+#define bzero(s,l)		memset(s,0,l)
+#define bcopy(s,d,l)	memcpy(d,s,l)
+#define bcmp(x, y, l)	memcmp(x,y,l)
+
+#define MIN(x,y)	((x) < (y) ? (x) : (y))
+
+#define device_printf(dev, a...) ({ \
+				printk("%s: ", device_get_nameunit(dev)); printk(a); \
+			})
+
+#undef printf
+#define printf(fmt...)	printk(fmt)
+
+#define KASSERT(c,p)	if (!(c)) { printk p ; } else
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define ocf_daemonize(str) \
+	daemonize(); \
+	spin_lock_irq(&current->sigmask_lock); \
+	sigemptyset(&current->blocked); \
+	recalc_sigpending(current); \
+	spin_unlock_irq(&current->sigmask_lock); \
+	sprintf(current->comm, str);
+#else
+#define ocf_daemonize(str) daemonize(str);
+#endif
+
+#define	TAILQ_INSERT_TAIL(q,d,m) list_add_tail(&(d)->m, (q))
+#define	TAILQ_EMPTY(q)	list_empty(q)
+#define	TAILQ_FOREACH(v, q, m) list_for_each_entry(v, q, m)
+
+#define read_random(p,l) get_random_bytes(p,l)
+
+#define DELAY(x)	((x) > 2000 ? mdelay((x)/1000) : udelay(x))
+#define strtoul simple_strtoul
+
+#define pci_get_vendor(dev)	((dev)->vendor)
+#define pci_get_device(dev)	((dev)->device)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define pci_set_consistent_dma_mask(dev, mask) (0)
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
+#define pci_dma_sync_single_for_cpu pci_dma_sync_single
+#endif
+
+#ifndef DMA_32BIT_MASK
+#define DMA_32BIT_MASK  0x00000000ffffffffULL
+#endif
+
+#ifndef htole32
+#define htole32(x)	cpu_to_le32(x)
+#endif
+#ifndef htobe32
+#define htobe32(x)	cpu_to_be32(x)
+#endif
+#ifndef htole16
+#define htole16(x)	cpu_to_le16(x)
+#endif
+#ifndef htobe16
+#define htobe16(x)	cpu_to_be16(x)
+#endif
+
+/* older kernels don't have these */
+
+#include <asm/irq.h>
+#if !defined(IRQ_NONE) && !defined(IRQ_RETVAL)
+#define IRQ_NONE
+#define IRQ_HANDLED
+#define IRQ_WAKE_THREAD
+#define IRQ_RETVAL
+#define irqreturn_t void
+typedef irqreturn_t (*irq_handler_t)(int irq, void *arg, struct pt_regs *regs);
+#endif
+#ifndef IRQF_SHARED
+#define IRQF_SHARED	SA_SHIRQ
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+# define strlcpy(dest,src,len) \
+		({strncpy(dest,src,(len)-1); ((char *)dest)[(len)-1] = '\0'; })
+#endif
+
+#ifndef MAX_ERRNO
+#define MAX_ERRNO	4095
+#endif
+#ifndef IS_ERR_VALUE
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,5)
+#include <linux/err.h>
+#endif
+#ifndef IS_ERR_VALUE
+#define IS_ERR_VALUE(x) ((unsigned long)(x) >= (unsigned long)-MAX_ERRNO)
+#endif
+#endif
+
+/*
+ * common debug for all
+ */
+#if 1
+#define dprintk(a...)	do { if (debug) printk(a); } while(0)
+#else
+#define dprintk(a...)
+#endif
+
+#ifndef SLAB_ATOMIC
+/* Changed in 2.6.20, must use GFP_ATOMIC now */
+#define	SLAB_ATOMIC	GFP_ATOMIC
+#endif
+
+/*
+ * need some additional support for older kernels */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,2)
+#define pci_register_driver_compat(driver, rc) \
+	do { \
+		if ((rc) > 0) { \
+			(rc) = 0; \
+		} else if (rc == 0) { \
+			(rc) = -ENODEV; \
+		} else { \
+			pci_unregister_driver(driver); \
+		} \
+	} while (0)
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
+#define pci_register_driver_compat(driver,rc) ((rc) = (rc) < 0 ? (rc) : 0)
+#else
+#define pci_register_driver_compat(driver,rc)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+
+#include <linux/mm.h>
+#include <asm/scatterlist.h>
+
+static inline void sg_set_page(struct scatterlist *sg,  struct page *page,
+			       unsigned int len, unsigned int offset)
+{
+	sg->page = page;
+	sg->offset = offset;
+	sg->length = len;
+}
+
+static inline void *sg_virt(struct scatterlist *sg)
+{
+	return page_address(sg->page) + sg->offset;
+}
+
+#define sg_init_table(sg, n)
+
+#define sg_mark_end(sg)
+
+#endif
+
+#ifndef late_initcall
+#define late_initcall(init) module_init(init)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) || !defined(CONFIG_SMP)
+#define ocf_for_each_cpu(cpu) for ((cpu) = 0; (cpu) == 0; (cpu)++)
+#else
+#define ocf_for_each_cpu(cpu) for_each_present_cpu(cpu)
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+#include <linux/sched.h>
+#define	kill_proc(p,s,v)	send_sig(s,find_task_by_vpid(p),0)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4)
+
+struct ocf_thread {
+	struct task_struct	*task;
+	int					(*func)(void *arg);
+	void				*arg;
+};
+
+/* thread startup helper func */
+static inline int ocf_run_thread(void *arg)
+{
+	struct ocf_thread *t = (struct ocf_thread *) arg;
+	if (!t)
+		return -1; /* very bad */
+	t->task = current;
+	daemonize();
+	spin_lock_irq(&current->sigmask_lock);
+	sigemptyset(&current->blocked);
+	recalc_sigpending(current);
+	spin_unlock_irq(&current->sigmask_lock);
+	return (*t->func)(t->arg);
+}
+
+#define kthread_create(f,a,fmt...) \
+	({ \
+		struct ocf_thread t; \
+		pid_t p; \
+		t.task = NULL; \
+		t.func = (f); \
+		t.arg = (a); \
+		p = kernel_thread(ocf_run_thread, &t, CLONE_FS|CLONE_FILES); \
+		while (p != (pid_t) -1 && t.task == NULL) \
+			schedule(); \
+		if (t.task) \
+			snprintf(t.task->comm, sizeof(t.task->comm), fmt); \
+		(t.task); \
+	})
+
+#define kthread_bind(t,cpu)	/**/
+
+#define kthread_should_stop()	(strcmp(current->comm, "stopping") == 0)
+
+#define kthread_stop(t) \
+	({ \
+		strcpy((t)->comm, "stopping"); \
+		kill_proc((t)->pid, SIGTERM, 1); \
+		do { \
+			schedule(); \
+		} while (kill_proc((t)->pid, SIGTERM, 1) == 0); \
+	})
+
+#else
+#include <linux/kthread.h>
+#endif
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
+#define	skb_frag_page(x)	((x)->page)
+#endif
+
+#endif /* __KERNEL__ */
+
+/****************************************************************************/
+#endif /* _BSD_COMPAT_H_ */
diff --git a/include/linux/of.h b/include/linux/of.h
index 4948552..7b3ed80 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -336,6 +336,22 @@
 #define of_match_node(_matches, _node)	NULL
 #endif /* CONFIG_OF */
 
+/**
+* of_property_read_bool - Findfrom a property
+* @np:         device node from which the property value is to be read.
+* @propname:   name of the property to be searched.
+*
+* Search for a property in a device node.
+* Returns true if the property exist false otherwise.
+*/
+static inline bool of_property_read_bool(const struct device_node *np,
+										const char *propname)
+{
+	struct property *prop = of_find_property(np, propname, NULL);
+
+	return prop ? true : false;
+}
+
 static inline int of_property_read_u32(const struct device_node *np,
 				       const char *propname,
 				       u32 *out_value)
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e90a673..35ff525 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -104,6 +104,9 @@
 #ifdef CONFIG_MEMORY_FAILURE
 	PG_hwpoison,		/* hardware poisoned page. Don't touch */
 #endif
+#ifdef CONFIG_RAID_ZERO_COPY
+        PG_constant,            /* const page not modified during raid5 io */
+#endif
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 	PG_compound_lock,
 #endif
@@ -193,6 +196,16 @@
 
 struct page;	/* forward declaration */
 
+#ifdef CONFIG_RAID_ZERO_COPY
+#define PageConstant(page) test_bit(PG_constant, &(page)->flags)
+#define SetPageConstant(page) set_bit(PG_constant, &(page)->flags)
+#define ClearPageConstant(page) clear_bit(PG_constant, &(page->flags))
+#define TestSetPageConstant(page) test_and_set_bit(PG_constant, &(page)->flags)
+extern void clear_page_constant(struct page *page);
+#endif
+
+
+
 TESTPAGEFLAG(Locked, locked)
 PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error)
 PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 80a0e22..6c2fb2f 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -750,6 +750,7 @@
    	__rta_reserve(skb, attrtype, attrlen); })
 
 extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change);
+extern void __rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
 
 /* RTNL is used as a global lock for all changes to network configuration  */
 extern void rtnl_lock(void);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 96348263..4b42eb7 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2104,6 +2104,11 @@
 extern int	       skb_copy_datagram_iovec(const struct sk_buff *from,
 					       int offset, struct iovec *to,
 					       int size);
+#if defined(CONFIG_COMCERTO_IMPROVED_SPLICE)
+extern int	       skb_copy_datagram_to_kernel_iovec(const struct sk_buff *from,
+					       int offset, struct iovec *to,
+					       int size);
+#endif
 extern int	       skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
 							int hlen,
 							struct iovec *iov);
diff --git a/include/linux/socket.h b/include/linux/socket.h
index ad919e0..ce12aa5 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -195,7 +195,10 @@
 #define AF_CAIF		37	/* CAIF sockets			*/
 #define AF_ALG		38	/* Algorithm sockets		*/
 #define AF_NFC		39	/* NFC sockets			*/
-#define AF_MAX		40	/* For now.. */
+#define AF_COMA		40	/* COMA sockets			*/
+#define AF_MAX		41	/* For now.. */
+
+
 
 /* Protocol families, same as address families. */
 #define PF_UNSPEC	AF_UNSPEC
@@ -238,8 +241,10 @@
 #define PF_CAIF		AF_CAIF
 #define PF_ALG		AF_ALG
 #define PF_NFC		AF_NFC
+#define PF_COMA		AF_COMA
 #define PF_MAX		AF_MAX
 
+
 /* Maximum queue length specifiable by listen.  */
 #define SOMAXCONN	128
 
@@ -266,6 +271,10 @@
 #define MSG_MORE	0x8000	/* Sender will send more */
 #define MSG_WAITFORONE	0x10000	/* recvmmsg(): block until 1+ packets avail */
 #define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
+#if defined(CONFIG_COMCERTO_IMPROVED_SPLICE)
+#define MSG_KERNSPACE   0x40000
+#define MSG_NOCATCHSIG	0x80000
+#endif
 #define MSG_EOF         MSG_FIN
 
 #define MSG_CMSG_CLOEXEC 0x40000000	/* Set close_on_exit for file
@@ -332,6 +341,9 @@
 			     int offset, int len);
 extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr);
 extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
+#if defined(CONFIG_COMCERTO_IMPROVED_SPLICE)
+extern void memcpy_tokerneliovec(struct iovec *iov, unsigned char *kdata, int len);
+#endif
 
 struct timespec;
 
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index a88fb69..ea6f8a4 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -18,6 +18,7 @@
 	u16 ctmask;		/* bitmask of ct events to be delivered */
 	u16 expmask;		/* bitmask of expect events to be delivered */
 	u32 pid;		/* netlink pid of destroyer */
+	struct timer_list timeout;
 };
 
 static inline struct nf_conntrack_ecache *
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index f49d132..22b499b 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -163,7 +163,7 @@
 }
 
 static ssize_t comcerto_mdma_prof_enable_store(struct kobject *kobj,
-				  struct kobj_attribute *attr, char *buf, size_t count)
+				  struct kobj_attribute *attr, const char *buf, size_t count)
 {
 	unsigned int enable;
 
@@ -271,7 +271,7 @@
 	return (n + 1);
 }
 static ssize_t comcerto_splice_prof_enable_store(struct kobject *kobj,
-				  struct kobj_attribute *attr, char *buf, size_t count)
+				  struct kobj_attribute *attr, const char *buf, size_t count)
 {
 	unsigned int enable;
 
@@ -455,10 +455,9 @@
 #endif
 
 #if defined(CONFIG_COMCERTO_AHCI_PROF)
-extern unsigned int ahci_time_counter[256]; // 4 ms -> 1S
-extern unsigned int ahci_data_counter[256]; 
-extern unsigned int init_ahci_prof;
-extern unsigned int enable_ahci_prof;
+
+#include "../drivers/ata/ahci.h"
+
 static ssize_t comcerto_ahci_prof_enable_show(struct kobject *kobj,
 				  struct kobj_attribute *attr, char *buf)
 {
@@ -473,7 +472,7 @@
 	return (n + 1);
 }
 static ssize_t comcerto_ahci_prof_enable_store(struct kobject *kobj,
-				  struct kobj_attribute *attr, char *buf, size_t count)
+				  struct kobj_attribute *attr, const char *buf, size_t count)
 {
 	unsigned int enable;
 
@@ -491,90 +490,129 @@
 static ssize_t comcerto_ahci_timing_show(struct kobject *kobj,
 				  struct kobj_attribute *attr, char *buf)
 {
-	int i;
-	int n;
+	int i, n, p;
 
 	buf[0] = '\0';
 	n = 0;
-	n += sprintf(buf, "Histogram of inter ahci write time (up to 1 sec otherwise date is discarded)\n");
-	init_ahci_prof = 0;
-	for (i = 0; i < 255; i++)
-	{
-		if (ahci_time_counter[i]) {
-			n += sprintf(buf + n, "%d in [%d-%d] ms\n", ahci_time_counter[i], (i * 8), (i * 8) + 8);
-			ahci_time_counter[i] = 0;
+	n += sprintf(buf, "Histogram of ahci inter request time (us)\n");
+
+	for (p = 0; p < MAX_AHCI_PORTS; p++) {
+		struct ahci_port_stats *stats = &ahci_port_stats[p];
+
+		n += sprintf(buf + n, "AHCI Port %d\n", p);
+
+		stats->init_prof = 0;
+
+		for (i = 0; i < MAX_BINS - 1; i++)
+		{
+			if (stats->time_counter[i]) {
+				n += sprintf(buf + n, "%8d in [%5d-%5d]\n", stats->time_counter[i], i << US_SHIFT, (i + 1) << US_SHIFT);
+				stats->time_counter[i] = 0;
+			}
+		}
+
+		if (stats->time_counter[MAX_BINS - 1]) {
+		 	n += sprintf(buf + n, "%d > %d us\n", stats->time_counter[MAX_BINS - 1], (MAX_BINS - 1) << US_SHIFT);
+			stats->time_counter[MAX_BINS - 1] = 0;
 		}
 	}
-	if (ahci_time_counter[255]) {
-	 	n += sprintf(buf + n, "%d > 1 second\n", ahci_time_counter[255]);
-		ahci_time_counter[255] = 0;
-	}
+
 	return (n + 1);
 }
 KERNEL_ATTR_RO(comcerto_ahci_timing);
 static ssize_t comcerto_ahci_data_show(struct kobject *kobj,
 				  struct kobj_attribute *attr, char *buf)
 {
-	int i;
-	int n;
+	int i, n, p;
 
 	buf[0] = '\0';
 	n = 0;
-	n += sprintf(buf, "Histogram of ahci write data length (up to 1M)\n");
+	n += sprintf(buf, "Histogram of ahci requests data length (KiB)\n");
 
-	for (i = 0; i < 256; i++)
-	{
-		if (ahci_data_counter[i]) {
-			n += sprintf(buf + n, "%d in [%d-%d] KB\n", ahci_data_counter[i], (i * 8), (i * 8) + 8);
-			ahci_data_counter[i] = 0;
+	for (p = 0; p < MAX_AHCI_PORTS; p++) {
+		struct ahci_port_stats *stats = &ahci_port_stats[p];
+
+		n += sprintf(buf + n, "AHCI Port %d\n", p);
+
+		for (i = 0; i < MAX_BINS; i++)
+		{
+			if (stats->data_counter[i]) {
+				n += sprintf(buf + n, "%8d in [%3d-%3d]\n", stats->data_counter[i], (i << BYTE_SHIFT) / 1024, ((i + 1) << BYTE_SHIFT) / 1024);
+				stats->data_counter[i] = 0;
+			}
 		}
 	}
+
 	return (n + 1);
 }
 KERNEL_ATTR_RO(comcerto_ahci_data);
 
 
-extern unsigned int ahci_qc_comp_counter[33];
-static ssize_t comcerto_ahci_qc_comp_timing_show(struct kobject *kobj,
+extern struct ahci_port_stats ahci_port_stats[MAX_AHCI_PORTS];
+static ssize_t comcerto_ahci_qc_rate_show(struct kobject *kobj,
 				  struct kobj_attribute *attr, char *buf)
 {
-	int i;
-	int n;
+	int i, n, p;
+	unsigned int mean_rate, total_kb;
 
 	buf[0] = '\0';
 	n = 0;
-	sprintf(buf, "Histogram of AHCI qc_complete time (in ms):\n");
-	n = strlen(buf);
-	for (i = 0; i < 32; i++)
-	{
-		if (ahci_qc_comp_counter[i]) {
-			sprintf(buf + n, "%d, in [%d-%d]ms\n",ahci_qc_comp_counter[i], (i * 16), (i * 16) + 16);
-			n = strlen(buf);
-			ahci_qc_comp_counter[i] = 0;
+	n += sprintf(buf, "Histogram of AHCI requests rate completion (MiB MiB/s):\n");
+
+	for (p = 0; p < MAX_AHCI_PORTS; p++) {
+		struct ahci_port_stats *stats = &ahci_port_stats[p];
+
+		n += sprintf(buf + n, "AHCI Port %d\n", p);
+		total_kb = 0;
+		mean_rate = 0;
+
+		for (i = 0; i < MAX_BINS; i++)
+		{
+			if (stats->rate_counter[i]) {
+				n += sprintf(buf + n, "%8d in [%3d-%3d]\n", stats->rate_counter[i] / 1024, i << RATE_SHIFT, (i + 1) << RATE_SHIFT);
+				mean_rate += stats->rate_counter[i] * (((2 * i + 1) << RATE_SHIFT) / 2);
+				total_kb += stats->rate_counter[i];
+				stats->rate_counter[i] = 0;
+			}
 		}
+		n += sprintf(buf + n, "\n");
+
+		for (i = 0; i < MAX_AHCI_SLOTS; i++) {
+			if (stats->pending_counter[i]) {
+				n += sprintf(buf + n, "%8d in [%2d]\n", stats->pending_counter[i], i);
+				stats->pending_counter[i] = 0;
+			}
+		}
+
+		if (total_kb) {
+			n += sprintf(buf + n, "Mean: %d MiB/s for %d MiB\n", mean_rate / total_kb, total_kb / 1024);
+			n += sprintf(buf + n, "Max issues in a row : %d \n\n", stats->nb_pending_max);
+		}
+
+		stats->nb_pending_max = 0;
 	}
-	if (ahci_qc_comp_counter[i]) {
-		sprintf(buf + n, "%d, in [> 512]ms\n",ahci_qc_comp_counter[i]);
-		n = strlen(buf);
-		ahci_qc_comp_counter[i] = 0;
-	}
+
 	return (n + 1);
 }
-KERNEL_ATTR_RO(comcerto_ahci_qc_comp_timing);
+KERNEL_ATTR_RO(comcerto_ahci_qc_rate);
 
 
 extern unsigned int ahci_qc_no_free_slot;
 static ssize_t comcerto_ahci_qc_no_free_slot_show(struct kobject *kobj,
 				  struct kobj_attribute *attr, char *buf)
 {
-	int n;
+	int n, p;
 
 	buf[0] = '\0';
 	n = 0;
-	sprintf(buf, "AHCI qc_no_free_slot count: %d\n", ahci_qc_no_free_slot);
-	ahci_qc_no_free_slot = 0;
 
-	n = strlen(buf);
+	for (p = 0; p < MAX_AHCI_PORTS; p++) {
+		struct ahci_port_stats *stats = &ahci_port_stats[p];
+
+		n += sprintf(buf + n, "AHCI Port %d no_free_slot count: %d\n", p, stats->no_free_slot);
+
+		stats->no_free_slot = 0;
+	}
 
 	return (n + 1);
 }
@@ -642,7 +680,7 @@
 	&comcerto_ahci_prof_enable_attr.attr,
 	&comcerto_ahci_timing_attr.attr,
 	&comcerto_ahci_data_attr.attr,
-	&comcerto_ahci_qc_comp_timing_attr.attr,
+	&comcerto_ahci_qc_rate_attr.attr,
 	&comcerto_ahci_qc_no_free_slot_attr.attr,
 #endif
 	NULL
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 36e0f09..bb998cc 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -15,6 +15,7 @@
 #include <linux/workqueue.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
+#include <mach/comcerto-2000/pm.h>
 
 #include "power.h"
 
@@ -306,6 +307,28 @@
 
 power_attr(state);
 
+#ifdef CONFIG_ARCH_M86XXX
+static ssize_t bitmask_show(struct kobject *kobj, struct kobj_attribute *attr,
+			char *buf)
+{
+	unsigned int value=c2k_pm_bitmask_show();
+	return sprintf(buf,"%02x\n",value);
+
+}
+
+static ssize_t bitmask_store(struct kobject *kobj, struct kobj_attribute *attr,
+			   const char *buf, size_t n)
+{
+	unsigned long value;
+	value=simple_strtoul(buf,NULL,16);
+	/* Store the Bitmask value in the Global Variable */
+	c2k_pm_bitmask_store(value);
+	return n;
+
+}
+power_attr(bitmask);
+#endif
+
 #ifdef CONFIG_PM_SLEEP
 /*
  * The 'wakeup_count' attribute, along with the functions defined in
@@ -404,6 +427,9 @@
 
 static struct attribute * g[] = {
 	&state_attr.attr,
+#ifdef CONFIG_ARCH_M86XXX
+	&bitmask_attr.attr,
+#endif
 #ifdef CONFIG_PM_TRACE
 	&pm_trace_attr.attr,
 	&pm_trace_dev_match_attr.attr,
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index af48faa..f31d659 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -171,15 +171,16 @@
 
 	arch_suspend_disable_irqs();
 	BUG_ON(!irqs_disabled());
-
-	error = syscore_suspend();
+	/* FIXME syscore suspend/resume is not working for HGW build */
+	//error = syscore_suspend();
+	error = 0;
 	if (!error) {
 		*wakeup = pm_wakeup_pending();
 		if (!(suspend_test(TEST_CORE) || *wakeup)) {
 			error = suspend_ops->enter(state);
 			events_check_enabled = false;
 		}
-		syscore_resume();
+		//syscore_resume();
 	}
 
 	arch_suspend_enable_irqs();
diff --git a/mm/filemap.c b/mm/filemap.c
index 556858c..e310263 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -28,6 +28,9 @@
 #include <linux/backing-dev.h>
 #include <linux/pagevec.h>
 #include <linux/blkdev.h>
+#ifdef CONFIG_RAID_ZERO_COPY
+#include <linux/rmap.h>
+#endif
 #include <linux/security.h>
 #include <linux/syscalls.h>
 #include <linux/cpuset.h>
@@ -609,10 +612,27 @@
 		BUG();
 
 	smp_mb__after_clear_bit();
+
+#ifdef CONFIG_RAID_ZERO_COPY
+	clear_page_constant(page);
+#endif
+
 	wake_up_page(page, PG_writeback);
 }
 EXPORT_SYMBOL(end_page_writeback);
 
+#ifdef CONFIG_RAID_ZERO_COPY
+void clear_page_constant(struct page *page)
+{
+       if (PageConstant(page)) {
+               ClearPageConstant(page);
+               SetPageUptodate(page);
+       }
+}
+EXPORT_SYMBOL(clear_page_constant);
+#endif
+
+
 /**
  * __lock_page - get a lock on the page, assuming we need to sleep to get it
  * @page: the page to lock
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 065dbe8..b7b898d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1668,6 +1668,11 @@
 			!cpuset_zone_allowed_softwall(zone, gfp_mask))
 				continue;
 
+#ifdef CONFIG_COMCERTO_ZONE_DMA_NCNB
+		if (!(gfp_mask & __GFP_DMA) && (zone_idx(zone) == ZONE_DMA))
+			continue;
+#endif
+
 		BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
 		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
 			unsigned long mark;
diff --git a/net/bridge/br.c b/net/bridge/br.c
index f20c4fd..e92aa84 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -100,6 +100,60 @@
 	br_fdb_fini();
 }
 
+#if defined(CONFIG_ARCH_COMCERTO)
+static ATOMIC_NOTIFIER_HEAD(brevent_notif_chain);
+
+/**
+ *	register_brevent_notifier - register a netevent notifier block
+ *	@nb: notifier
+ *
+ *	Register a notifier to be called when a bridge event occurs.
+ *	The notifier passed is linked into the kernel structures and must
+ *	not be reused until it has been unregistered. A negative errno code
+ *	is returned on a failure.
+ */
+int register_brevent_notifier(struct notifier_block *nb)
+{
+	int err;
+
+	err = atomic_notifier_chain_register(&brevent_notif_chain, nb);
+	return err;
+}
+
+/**
+ *	unregister_brevent_notifier - unregister a netevent notifier block
+ *	@nb: notifier
+ *
+ *	Unregister a notifier previously registered by
+ *	register_neigh_notifier(). The notifier is unlinked into the
+ *	kernel structures and may then be reused. A negative errno code
+ *	is returned on a failure.
+ */
+
+int unregister_brevent_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&brevent_notif_chain, nb);
+}
+
+/**
+ *	call_brevent_notifiers - call all netevent notifier blocks
+ *      @val: value passed unmodified to notifier function
+ *      @v:   pointer passed unmodified to notifier function
+ *
+ *	Call all neighbour notifier blocks.  Parameters and return value
+ *	are as for notifier_call_chain().
+ */
+
+int call_brevent_notifiers(unsigned long val, void *v)
+{
+	return atomic_notifier_call_chain(&brevent_notif_chain, val, v);
+}
+
+EXPORT_SYMBOL_GPL(register_brevent_notifier);
+EXPORT_SYMBOL_GPL(unregister_brevent_notifier);
+EXPORT_SYMBOL_GPL(call_brevent_notifiers);
+#endif
+
 module_init(br_init)
 module_exit(br_deinit)
 MODULE_LICENSE("GPL");
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index c8e7861..3ae122f 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -18,6 +18,10 @@
 #include <linux/times.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#if defined(CONFIG_ARCH_COMCERTO)
+#include <linux/rtnetlink.h>
+#include <linux/module.h>
+#endif
 #include <linux/jhash.h>
 #include <linux/random.h>
 #include <linux/slab.h>
@@ -32,6 +36,11 @@
 
 static u32 fdb_salt __read_mostly;
 
+#if defined(CONFIG_ARCH_COMCERTO)
+	int(*br_fdb_can_expire)(unsigned char *mac_addr, struct net_device *dev) = NULL;
+	DEFINE_SPINLOCK(br_fdb_cb_lock);
+#endif
+
 int __init br_fdb_init(void)
 {
 	br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
@@ -42,6 +51,9 @@
 		return -ENOMEM;
 
 	get_random_bytes(&fdb_salt, sizeof(fdb_salt));
+#if defined(CONFIG_ARCH_COMCERTO)
+	spin_lock_init(&br_fdb_cb_lock);
+#endif
 	return 0;
 }
 
@@ -142,6 +154,15 @@
 			unsigned long this_timer;
 			if (f->is_static)
 				continue;
+#if defined(CONFIG_ARCH_COMCERTO)
+				spin_lock(&br_fdb_cb_lock);
+				if(br_fdb_can_expire && !(*br_fdb_can_expire)(f->addr.addr, f->dst->dev)){
+					f->updated = jiffies;
+					spin_unlock(&br_fdb_cb_lock);
+					continue;
+				}
+				spin_unlock(&br_fdb_cb_lock);
+#endif
 			this_timer = f->updated + delay;
 			if (time_before_eq(this_timer, jiffies))
 				fdb_delete(f);
@@ -418,7 +439,19 @@
 					"own address as source address\n",
 					source->dev->name);
 		} else {
-			/* fastpath: update of existing entry */
+				/* fastpath: update of existing entry */
+#if defined(CONFIG_ARCH_COMCERTO)
+				if (fdb->dst != source) {
+					struct brevent_fdb_update fdb_update;
+
+					fdb_update.dev = source->dev;
+					fdb_update.mac_addr = fdb->addr.addr;
+					//FIXME
+					//__rtmsg_ifinfo(RTM_NEWLINK, br->dev, 0, GFP_ATOMIC);
+					//FIXME
+					call_brevent_notifiers(BREVENT_FDB_UPDATE, &fdb_update);
+				}
+#endif
 			fdb->dst = source;
 			fdb->updated = jiffies;
 		}
@@ -434,6 +467,24 @@
 	}
 }
 
+#if defined(CONFIG_ARCH_COMCERTO)
+void br_fdb_register_can_expire_cb(int(*cb)(unsigned char *mac_addr, struct net_device *dev))
+{
+        spin_lock_bh(&br_fdb_cb_lock);
+        br_fdb_can_expire = cb;
+        spin_unlock_bh(&br_fdb_cb_lock);
+}
+EXPORT_SYMBOL(br_fdb_register_can_expire_cb);
+
+void br_fdb_deregister_can_expire_cb()
+{
+        spin_lock_bh(&br_fdb_cb_lock);
+        br_fdb_can_expire = NULL;
+        spin_unlock_bh(&br_fdb_cb_lock);
+}
+EXPORT_SYMBOL(br_fdb_deregister_can_expire_cb);
+#endif
+
 static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
 {
 	if (fdb->is_local)
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 988c300..b457fd7 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -70,6 +70,10 @@
 
 	BR_INPUT_SKB_CB(skb)->brdev = br->dev;
 
+#if defined(CONFIG_ARCH_COMCERTO)
+	skb->cb[4] = 0;
+#endif
+
 	/* The packet skb2 goes to the local host (NULL to skip). */
 	skb2 = NULL;
 
@@ -108,6 +112,10 @@
 	if (skb) {
 		if (dst) {
 			dst->used = jiffies;
+#if defined(CONFIG_ARCH_COMCERTO)
+			/* Used by ABM module */
+			skb->cb[4] = 1;
+#endif
 			br_forward(dst->dst, skb, skb2);
 		} else
 			br_flood_forward(br, skb, skb2);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 4d6fb62..4c6dbbd 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -359,6 +359,10 @@
 extern void br_fdb_update(struct net_bridge *br,
 			  struct net_bridge_port *source,
 			  const unsigned char *addr);
+#if defined(CONFIG_ARCH_COMCERTO)
+	extern void br_fdb_register_can_expire_cb(int(*cb)(unsigned char *mac_addr, struct net_device *dev)); 
+	extern void br_fdb_deregister_can_expire_cb(void);
+#endif 
 extern int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb);
 extern int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
 extern int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 19308e3..43d67aa 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -121,6 +121,10 @@
 
 	if (br_is_root_bridge(br) && !wasroot)
 		br_become_root_bridge(br);
+
+#if defined(CONFIG_ARCH_COMCERTO)
+	call_brevent_notifiers(BREVENT_PORT_DOWN, p->dev);
+#endif
 }
 
 static void br_stp_start(struct net_bridge *br)
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 68bbf9f..624b6b8 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -128,6 +128,188 @@
 	goto out;
 }
 
+#if defined(CONFIG_COMCERTO_IMPROVED_SPLICE)
+/*
+ *	skb_copy_datagram_to_kernel_iovec - Copy a datagram to a kernel iovec structure.
+ *	@skb: buffer to copy
+ *	@offset: offset in the buffer to start copying from
+ *	@to: io vector to copy to
+ *	@len: amount of data to copy from buffer to iovec
+ *
+ *	Note: the iovec is modified during the copy.
+ */
+
+#if defined(CONFIG_COMCERTO_SPLICE_USE_MDMA)
+int skb_copy_datagram_to_kernel_iovec_soft(const struct sk_buff *skb, int offset,
+				      struct iovec *to, int len)
+#else
+int skb_copy_datagram_to_kernel_iovec(const struct sk_buff *skb, int offset,
+				      struct iovec *to, int len)
+#endif
+{
+	int i, fraglen, end = 0;
+	struct sk_buff *next = skb_shinfo(skb)->frag_list;
+
+	if (!len)
+		return 0;
+
+next_skb:
+	fraglen = skb_headlen(skb);
+	i = -1;
+
+	while (1) {
+		int start = end;
+
+		if ((end += fraglen) > offset) {
+			int copy = end - offset;
+			int o = offset - start;
+
+			if (copy > len)
+				copy = len;
+			if (i == -1)
+				memcpy_tokerneliovec(to, skb->data + o, copy);
+			else {
+				skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+				struct page *page = skb_frag_page(frag);
+				void *p = kmap(page) + frag->page_offset + o;
+				memcpy_tokerneliovec(to, p, copy);
+				kunmap(page);
+			}
+
+			if (!(len -= copy))
+				return 0;
+			offset += copy;
+		}
+		if (++i >= skb_shinfo(skb)->nr_frags)
+			break;
+		fraglen = skb_shinfo(skb)->frags[i].size;
+	}
+	if (next) {
+		skb = next;
+		BUG_ON(skb_shinfo(skb)->frag_list);
+		next = skb->next;
+		goto next_skb;
+	}
+
+	return -EFAULT;
+}
+
+#if defined(CONFIG_COMCERTO_SPLICE_USE_MDMA)
+#include <mach/hardware.h>
+#include <mach/dma.h>
+int skb_copy_datagram_to_kernel_iovec(const struct sk_buff *skb, int offset,
+				      struct iovec *to, int len)
+{
+	int i, ret, fraglen, copy, o, end = 0;
+	struct sk_buff *next = skb_shinfo(skb)->frag_list;
+
+	struct comcerto_dma_sg *sg;
+	unsigned int size;
+	int total_len, input_len;
+	if (!len)
+		return 0;
+
+	total_len = len;
+	
+	size = sizeof(struct comcerto_dma_sg);
+	sg = kmalloc(size, GFP_ATOMIC);
+	if (!sg)
+		return skb_copy_datagram_to_kernel_iovec_soft (skb, offset, to, len);
+
+	comcerto_dma_sg_init(sg);
+
+next_skb:
+	fraglen = skb_headlen(skb);
+	i = -1;
+
+	while (1) {
+		int start = end;
+
+		if ((end += fraglen) > offset) {
+current_frag:
+			copy = end - offset;
+			o = offset - start;
+
+			if (copy > len)
+				copy = len;
+			
+			// preparing input
+			if (i == -1) {
+				ret = comcerto_dma_sg_add_input(sg, skb->data + o, copy, 0);
+			} else {
+				skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+				struct page *page = skb_frag_page(frag);
+				void *p = page_address(page) + frag->page_offset + o;
+				ret = comcerto_dma_sg_add_input(sg, p, copy, 0);
+			}
+			if (likely(ret == 0)) {
+				len -= copy;
+				offset += copy;
+			}
+
+			if ((len == 0) || unlikely(ret))
+			{
+				input_len = total_len - len;
+				len = input_len;
+				//preparing output
+				while (len > 0) {
+					if (to->iov_len) {
+						int copy = min_t(unsigned int, to->iov_len, len);
+
+						ret = comcerto_dma_sg_add_output(sg, to->iov_base, copy, 1);
+						if (unlikely(ret)) {
+							/* no clean way out, but this should never happen the way
+							 * skb_copy_datagram_to_kernel_iovec is called currently.
+							 */
+							comcerto_dma_sg_cleanup(sg, input_len);
+							kfree(sg);
+							return -EFAULT;
+						}
+						len -= copy;
+						to->iov_base += copy;
+						to->iov_len -= copy;
+					}
+					if (to->iov_len == 0)
+						to++;
+				}
+
+				//let's run the dma operation
+				comcerto_dma_get();
+				comcerto_dma_sg_setup(sg, input_len);
+				comcerto_dma_start();
+				comcerto_dma_wait();
+				comcerto_dma_put();
+				comcerto_dma_sg_cleanup(sg, input_len);
+
+				total_len = total_len - input_len;
+				if (total_len) {// Yes => last input fragment failed, add it again
+					comcerto_dma_sg_init(sg);
+					goto current_frag;
+				} else { //Everything copied, exit successfully
+					kfree(sg);
+					return 0;
+				}
+
+			}
+		}
+		if (++i >= skb_shinfo(skb)->nr_frags)
+			break;
+		fraglen = skb_shinfo(skb)->frags[i].size;
+	}
+	if (next) {
+		skb = next;
+		BUG_ON(skb_shinfo(skb)->frag_list);
+		next = skb->next;
+		goto next_skb;
+	}
+
+	comcerto_dma_sg_cleanup(sg, total_len - len);
+	kfree(sg);
+	return -EFAULT;
+}
+#endif
+#endif
+
 /**
  *	__skb_recv_datagram - Receive a datagram skbuff
  *	@sk: socket
diff --git a/net/core/dev.c b/net/core/dev.c
index 0308601..35ac2f6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2460,6 +2460,16 @@
  */
 int dev_queue_xmit(struct sk_buff *skb)
 {
+#if defined(CONFIG_ARCH_COMCERTO)
+	if (skb->dev->flags & IFF_WIFI_OFLD)
+		skb->dev = skb->dev->wifi_offload_dev;
+
+	return original_dev_queue_xmit(skb);
+}
+
+int original_dev_queue_xmit(struct sk_buff *skb)
+{
+#endif
 	struct net_device *dev = skb->dev;
 	struct netdev_queue *txq;
 	struct Qdisc *q;
@@ -2539,6 +2549,10 @@
 }
 EXPORT_SYMBOL(dev_queue_xmit);
 
+#if defined(CONFIG_ARCH_COMCERTO)
+EXPORT_SYMBOL(original_dev_queue_xmit);
+#endif
+
 
 /*=======================================================================
 			Receiver routines
diff --git a/net/core/flow.c b/net/core/flow.c
index 9287067..a0e9eaa 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -51,6 +51,9 @@
 	u32				hash_rnd;
 	int				hash_rnd_recalc;
 	struct tasklet_struct		flush_tasklet;
+#if defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)
+	struct tasklet_struct		flowcache_rem_tasklet;
+#endif
 };
 
 struct flow_flush_info {
@@ -68,6 +71,16 @@
 	struct timer_list		rnd_timer;
 };
 
+#if defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)
+struct flow_remove_info {
+	struct flow_cache		*cache;
+	struct flowi			*key;
+	unsigned short			family;
+	unsigned short			dir;
+	atomic_t			cpuleft;
+	struct completion		completion;
+};
+#endif
 atomic_t flow_cache_genid = ATOMIC_INIT(0);
 EXPORT_SYMBOL(flow_cache_genid);
 static struct flow_cache flow_cache_global;
@@ -394,17 +407,31 @@
 }
 
 #if defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)
-void flow_cache_remove(const struct flowi *key, 
-			unsigned short family, unsigned short dir)
+static void flow_cache_remove_per_cpu(void *data)
 {
+	struct flow_remove_info* info = data;
+	int cpu;
+	struct tasklet_struct *tasklet;
+
+	cpu = smp_processor_id();
+	tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flowcache_rem_tasklet;
+	tasklet->data = (unsigned long)info;
+	tasklet_schedule(tasklet);
+}
+
+void flow_cache_remove_tasklet(unsigned long data) 
+{
+	struct flow_remove_info* info = (struct flow_remove_info*) data;
 	struct flow_cache *fc = &flow_cache_global;
 	struct flow_cache_percpu *fcp;
 	struct flow_cache_entry *fle;
 	struct hlist_node *entry;
 	size_t keysize;
 	unsigned int hash;
+	struct flowi *key = info->key;
+	unsigned short family = info->family;
+	unsigned short dir = info->dir;
 
-	local_bh_disable();
 	fcp = this_cpu_ptr(fc->percpu);
 	
 	keysize = flow_key_size(family);
@@ -422,8 +449,38 @@
 	}
 		
 nocache:	
-	local_bh_enable();
+	if (atomic_dec_and_test(&info->cpuleft))
+		complete(&info->completion);
+	return;
 }
+
+void flow_cache_remove(const struct flowi *key,
+			unsigned short family, unsigned short dir)
+{
+	struct flow_remove_info info;
+	static DEFINE_MUTEX(flow_rem_sem);
+
+	/* Don't want cpus going down or up during this. */
+	get_online_cpus();
+	mutex_lock(&flow_rem_sem);
+	info.cache = &flow_cache_global;
+	info.key = (struct flowi*)key;
+	//memcpy(&info.key, key, sizeof(struct flowi));
+	info.family = family;
+	info.dir = dir;
+        atomic_set(&info.cpuleft, num_online_cpus());
+        init_completion(&info.completion);
+
+        local_bh_disable();
+	smp_call_function(flow_cache_remove_per_cpu, &info, 1);
+	flow_cache_remove_tasklet((unsigned long)&info);
+        local_bh_enable();
+
+        wait_for_completion(&info.completion);
+        mutex_unlock(&flow_rem_sem);
+        put_online_cpus();
+}
+
 #endif
 
 static void flow_cache_flush_task(struct work_struct *work)
@@ -452,6 +509,9 @@
 		fcp->hash_rnd_recalc = 1;
 		fcp->hash_count = 0;
 		tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
+#if defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)
+		tasklet_init(&fcp->flowcache_rem_tasklet, flow_cache_remove_tasklet, 0);
+#endif
 	}
 	return 0;
 }
diff --git a/net/core/iovec.c b/net/core/iovec.c
index c40f27e..6665c0c 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -124,6 +124,28 @@
 }
 EXPORT_SYMBOL(memcpy_toiovecend);
 
+#if defined(CONFIG_COMCERTO_IMPROVED_SPLICE)
+/*
+ *	In kernel copy to iovec. Returns -EFAULT on error.
+ *
+ *	Note: this modifies the original iovec.
+ */
+void memcpy_tokerneliovec(struct iovec *iov, unsigned char *kdata, int len)
+{
+	while (len > 0) {
+		if (iov->iov_len) {
+			int copy = min_t(unsigned int, iov->iov_len, len);
+			memcpy(iov->iov_base, kdata, copy);
+			len -= copy;
+			kdata += copy;
+			iov->iov_base += copy;
+			iov->iov_len -= copy;
+		}
+		iov++;
+	}
+}
+#endif
+
 /*
  *	Copy iovec to kernel. Returns -EFAULT on error.
  *
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9fcb858..403a253 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1935,14 +1935,14 @@
 	return skb->len;
 }
 
-void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
+void __rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags)
 {
 	struct net *net = dev_net(dev);
 	struct sk_buff *skb;
 	int err = -ENOBUFS;
 	size_t if_info_size;
 
-	skb = nlmsg_new((if_info_size = if_nlmsg_size(dev)), GFP_KERNEL);
+	skb = nlmsg_new((if_info_size = if_nlmsg_size(dev)), flags);
 	if (skb == NULL)
 		goto errout;
 
@@ -1955,12 +1955,20 @@
 		kfree_skb(skb);
 		goto errout;
 	}
-	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
+	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
 	return;
 errout:
 	if (err < 0)
 		rtnl_set_sk_err(net, RTNLGRP_LINK, err);
 }
+#if defined(CONFIG_ARCH_COMCERTO)
+EXPORT_SYMBOL(__rtmsg_ifinfo);
+#endif
+
+void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
+{
+	__rtmsg_ifinfo(type, dev, change, GFP_KERNEL);
+}
 #if defined(CONFIG_COMCERTO_FP)
 EXPORT_SYMBOL(rtmsg_ifinfo);
 #endif
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7cb4644..9b9be63 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -687,6 +687,24 @@
 {
 #define C(x) n->x = skb->x
 
+#if defined(CONFIG_COMCERTO_CUSTOM_SKB_LAYOUT)
+	if (skb->mspd_data) {
+		if (skb->mspd_len) {
+			int ofst = skb->len - skb->mspd_len;
+
+			memcpy(skb->data + ofst, skb->mspd_data + skb->mspd_ofst, skb->mspd_len);
+			skb->mspd_len = 0;
+		}
+
+		WARN_ON(skb_shared(skb));
+
+		if (!skb_shared(skb)) {
+			kfree(skb->mspd_data);
+			skb->mspd_data = NULL;
+		}
+	}
+#endif
+
 	n->next = n->prev = NULL;
 	n->sk = NULL;
 	__copy_skb_header(n, skb);
@@ -706,6 +724,7 @@
 	atomic_set(&n->users, 1);
 
 #if defined(CONFIG_COMCERTO_CUSTOM_SKB_LAYOUT)
+	WARN_ON(skb->mspd_data);
 	C(mspd_data);
 	C(mspd_len);
 	C(mspd_ofst);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index cf39c0f..132e9fd 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1516,6 +1516,23 @@
 	do {
 		u32 offset;
 
+#if defined(CONFIG_COMCERTO_IMPROVED_SPLICE)
+		if (flags & MSG_NOCATCHSIG) {
+			if (signal_pending(current)) {
+				if (sigismember(&current->pending.signal, SIGQUIT) ||
+				    sigismember(&current->pending.signal, SIGABRT) ||
+				    sigismember(&current->pending.signal, SIGKILL) ||
+				    sigismember(&current->pending.signal, SIGTERM) ||
+				    sigismember(&current->pending.signal, SIGSTOP)) {
+
+					if (copied)
+						break;
+					copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
+					break;
+				}
+			}
+		} else
+#endif
 		/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
 		if (tp->urg_data && tp->urg_seq == *seq) {
 			if (copied)
@@ -1744,8 +1761,16 @@
 			} else
 #endif
 			{
-				err = skb_copy_datagram_iovec(skb, offset,
-						msg->msg_iov, used);
+#if defined(CONFIG_COMCERTO_IMPROVED_SPLICE)
+				if (msg->msg_flags & MSG_KERNSPACE)
+				{
+					err = skb_copy_datagram_to_kernel_iovec(skb,
+							offset, msg->msg_iov, used);
+				}				
+				else
+#endif
+					err = skb_copy_datagram_iovec(skb, offset,
+							msg->msg_iov, used);
 				if (err) {
 					/* Exception. Bailout! */
 					if (!copied)
diff --git a/net/ipv6/ethipip6.c b/net/ipv6/ethipip6.c
index 34f45c6..c1765c7 100644
--- a/net/ipv6/ethipip6.c
+++ b/net/ipv6/ethipip6.c
@@ -889,8 +889,15 @@
         } else {
 		dst = ip6_route_output(net, NULL, &fl->u.ip6);
 
-		if (dst->error || xfrm_lookup(net, dst, fl, NULL, 0) < 0)
+		if(dst->error)
 			goto tx_err_link_failure;
+		dst = xfrm_lookup(net, dst, fl, NULL, 0);
+		if(IS_ERR(dst))
+		{
+			err = PTR_ERR(dst);
+			dst = NULL;
+			goto tx_err_link_failure;
+		}
 #if defined(CONFIG_INET6_IPSEC_OFFLOAD)
 		t->genid = atomic_read(&flow_cache_genid);
 #endif
@@ -947,6 +954,7 @@
 	skb_dst_set(skb, dst_clone(dst));
 
 	skb->transport_header = skb->network_header;
+	IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
 
 	etherip_ver  = (__u16 *)skb_push(skb, ETH_IPHLEN);
 	*etherip_ver = htons(ETHERIP_VERSION);
@@ -968,6 +976,7 @@
 	ipv6_addr_copy(&ipv6h->daddr, &fl->u.ip6.daddr);
 	nf_reset(skb);
 	pkt_len = skb->len;
+	skb->local_df = 1;
 	err = ip6_local_out(skb);
 
 	if (net_xmit_eval(err) == 0) {
@@ -1122,6 +1131,8 @@
 	struct net_device *dev = t->dev;
 	struct ip6_tnl_parm *p = &t->parms;
 	struct flowi *fl = &t->fl;
+	struct net_device *ldev = NULL;
+	struct net *net = dev_net(t->dev);
 
 	memcpy(dev->dev_addr, &p->laddr, dev->addr_len);
 	/* Make sure that dev_addr is nither mcast nor all zeros */
@@ -1150,6 +1161,17 @@
 
 	dev->iflink = p->link;
 
+	/* Initialize the default mtu  of tunnel with it's parent interface
+	mtu */
+	rcu_read_lock();
+	if (p->link)
+	{
+		ldev = dev_get_by_index_rcu(net, p->link);
+		if (ldev)
+			dev->mtu = ldev->mtu;
+	}
+	rcu_read_unlock();
+
 	if (p->flags & IP6_TNL_F_CAP_XMIT) {
 		int strict = (ipv6_addr_type(&p->raddr) &
 			      (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
index ebbb836..2698150 100644
--- a/net/ipv6/netfilter/ip6t_NPT.c
+++ b/net/ipv6/netfilter/ip6t_NPT.c
@@ -58,7 +58,7 @@
 		if (pfx_len - i >= 32)
 			mask = 0;
 		else
-			mask = htonl(~((1 << (pfx_len - i)) - 1));
+			mask = htonl((1 << (i - pfx_len + 32)) - 1);
 
 		idx = i / 32;
 		addr->s6_addr32[idx] &= mask;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index d879f7e..aa83e48 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -123,11 +123,17 @@
 {
 	struct flowi6 *fl6 = &fl->u.ip6;
 	int onlyproto = 0;
-	u16 offset = skb_network_header_len(skb);
-	const struct ipv6hdr *hdr = ipv6_hdr(skb);
+	/* use the reassembled packet if conntrack has done the reassembly */
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
+	struct sk_buff *whole_skb = (skb->nfct_reasm) ? skb->nfct_reasm : skb;
+#else
+	struct sk_buff *whole_skb = skb;
+#endif
+	u16 offset = skb_network_header_len(whole_skb);
+	const struct ipv6hdr *hdr = ipv6_hdr(whole_skb);
 	struct ipv6_opt_hdr *exthdr;
-	const unsigned char *nh = skb_network_header(skb);
-	u8 nexthdr = nh[IP6CB(skb)->nhoff];
+	const unsigned char *nh = skb_network_header(whole_skb);
+	u8 nexthdr = nh[IP6CB(whole_skb)->nhoff];
 
 	memset(fl6, 0, sizeof(struct flowi6));
 	fl6->flowi6_mark = skb->mark;
@@ -135,9 +141,9 @@
 	ipv6_addr_copy(&fl6->daddr, reverse ? &hdr->saddr : &hdr->daddr);
 	ipv6_addr_copy(&fl6->saddr, reverse ? &hdr->daddr : &hdr->saddr);
 
-	while (nh + offset + 1 < skb->data ||
-	       pskb_may_pull(skb, nh + offset + 1 - skb->data)) {
-		nh = skb_network_header(skb);
+	while (nh + offset + 1 < whole_skb->data ||
+		pskb_may_pull(whole_skb, nh + offset + 1 - whole_skb->data)) {
+		nh = skb_network_header(whole_skb);
 		exthdr = (struct ipv6_opt_hdr *)(nh + offset);
 
 		switch (nexthdr) {
@@ -156,8 +162,8 @@
 		case IPPROTO_TCP:
 		case IPPROTO_SCTP:
 		case IPPROTO_DCCP:
-			if (!onlyproto && (nh + offset + 4 < skb->data ||
-			     pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
+			if (!onlyproto && (nh + offset + 4 < whole_skb->data ||
+			pskb_may_pull(whole_skb, nh + offset + 4 - whole_skb->data))) {
 				__be16 *ports = (__be16 *)exthdr;
 
 				fl6->fl6_sport = ports[!!reverse];
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 89ff8c6..7501b22 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1253,11 +1253,10 @@
 	/* Remove from tunnel list */
 	spin_lock_bh(&pn->l2tp_tunnel_list_lock);
 	list_del_rcu(&tunnel->list);
+	kfree_rcu(tunnel, rcu);
 	spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
-	synchronize_rcu();
 
 	atomic_dec(&l2tp_tunnel_count);
-	kfree(tunnel);
 }
 
 /* Create a socket for the tunnel, if one isn't set up by
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a16a48e..4393794 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -157,6 +157,7 @@
 
 struct l2tp_tunnel {
 	int			magic;		/* Should be L2TP_TUNNEL_MAGIC */
+	struct rcu_head rcu;
 	rwlock_t		hlist_lock;	/* protect session_hlist */
 	struct hlist_head	session_hlist[L2TP_HASH_SIZE];
 						/* hashed list of sessions,
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 3d7fe03..3d3622ac 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -255,12 +255,15 @@
 {
 	struct nf_conn *ct = (void *)ul_conntrack;
 	struct net *net = nf_ct_net(ct);
+	struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
+
+	BUG_ON(ecache == NULL);
 
 	if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
 		/* bad luck, let's retry again */
-		ct->timeout.expires = jiffies +
+		ecache->timeout.expires = jiffies +
 			(random32() % net->ct.sysctl_events_retry_timeout);
-		add_timer(&ct->timeout);
+		add_timer(&ecache->timeout);
 		return;
 	}
 	/* we've got the event delivered, now it's dying */
@@ -274,6 +277,9 @@
 void nf_ct_insert_dying_list(struct nf_conn *ct)
 {
 	struct net *net = nf_ct_net(ct);
+	struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
+
+	BUG_ON(ecache == NULL);
 
 	/* add this conntrack to the dying list */
 	spin_lock_bh(&nf_conntrack_lock);
@@ -281,10 +287,10 @@
 			     &net->ct.dying);
 	spin_unlock_bh(&nf_conntrack_lock);
 	/* set a new timer to retry event delivery */
-	setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
-	ct->timeout.expires = jiffies +
+	setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
+	ecache->timeout.expires = jiffies +
 		(random32() % net->ct.sysctl_events_retry_timeout);
-	add_timer(&ct->timeout);
+	add_timer(&ecache->timeout);
 }
 EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
 
@@ -644,8 +650,12 @@
 	if (del_timer(&ct->timeout)) {
 #endif
 		death_by_timeout((unsigned long)ct);
-		dropped = 1;
-		NF_CT_STAT_INC_ATOMIC(net, early_drop);
+		/* Check if we indeed killed this entry. Reliable event
+		   delivery may have inserted it into the dying list. */
+		if (test_bit(IPS_DYING_BIT, &ct->status)) {
+			dropped = 1;
+			NF_CT_STAT_INC_ATOMIC(net, early_drop);
+		}
 	}
 	nf_ct_put(ct);
 	return dropped;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 2c48c8f..0683213 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1123,7 +1123,36 @@
 	ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK);
 	return 0;
 }
+#if defined(CONFIG_COMCERTO_FP)
+/*
+ * This function detects ctnetlink messages that require
+ * to set the conntrack status to IPS_PERMANENT.
+ * It updates only this bit regardless of other possible
+ * changes.
+ * Return 0 if succesfull
+ */
+static int
+ctnetlink_change_permanent(struct nf_conn *ct, const struct nlattr * const cda[])
+{
+	unsigned int status;
+	u_int32_t id;
 
+	if (cda[CTA_STATUS] && cda[CTA_ID]) {
+		status = ntohl(nla_get_be32(cda[CTA_STATUS]));
+		id = ntohl(nla_get_be32(cda[CTA_ID]));
+
+		if (status & IPS_PERMANENT) {
+			if ((u32)(unsigned long)ct == id) {
+				ct->status |= IPS_PERMANENT;
+				return 0;
+			}
+			else
+				return -ENOENT;
+		}
+	}
+	return -1;
+}
+#endif
 static int
 ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[])
 {
@@ -1594,6 +1623,13 @@
 	if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
 		struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
 
+#if defined(CONFIG_COMCERTO_FP)
+		/* If the permanent status has been set, this is a specific
+		 * message. Don't broadcast the event and don't update the ct */
+		err = ctnetlink_change_permanent(ct, cda);
+		if ((err == 0) || (err == -ENOENT))
+			goto out_unlock;
+#endif
 		err = ctnetlink_change_conntrack(ct, cda);
 		if (err == 0) {
 			nf_conntrack_get(&ct->ct_general);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 621e2da..0758b88 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -223,6 +223,12 @@
 		if (seq_printf(s, "[ASSURED] "))
 			goto release;
 
+#ifdef CONFIG_COMCERTO_FP
+	if (test_bit(IPS_PERMANENT_BIT, &ct->status))
+		if (seq_printf(s, "[PERMANENT] "))
+			goto release;
+#endif
+
 #if defined(CONFIG_NF_CONNTRACK_MARK)
 	if (seq_printf(s, "mark=%u ", ct->mark))
 		goto release;